diff --git a/Gopkg.lock b/Gopkg.lock index 1c537e4489b..81918d4d6a8 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -4,8 +4,8 @@ [[projects]] name = "cloud.google.com/go" packages = ["compute/metadata"] - revision = "20d4028b8a750c2aca76bf9fefa8ed2d0109b573" - version = "v0.19.0" + revision = "0fd7230b2a7505833d5f69b75cbd6c9582401479" + version = "v0.23.0" [[projects]] name = "github.com/Azure/go-autorest" @@ -26,8 +26,8 @@ [[projects]] name = "github.com/Masterminds/semver" packages = ["."] - revision = "15d8430ab86497c5c0da827b748823945e1cf1e1" - version = "v1.4.0" + revision = "c7af12943936e8c39859482e61f0574c2fd7fc75" + version = "v1.4.2" [[projects]] name = "github.com/PuerkitoBio/purell" @@ -51,7 +51,7 @@ branch = "master" name = "github.com/beorn7/perks" packages = ["quantile"] - revision = "4c0e84591b9aa9e6dcfdf3e020114cd81f89d5f9" + revision = "3a771d992973f24aa725d07868b467d1ddfceafb" [[projects]] name = "github.com/davecgh/go-spew" @@ -62,14 +62,14 @@ [[projects]] name = "github.com/dgrijalva/jwt-go" packages = ["."] - revision = "dbeaa9332f19a944acb5736b4456cfcc02140e29" - version = "v3.1.0" + revision = "06ea1031745cb8b3dab3f6a236daf2b0aa468b7e" + version = "v3.2.0" [[projects]] name = "github.com/disintegration/imaging" packages = ["."] - revision = "b039796423f87e7caca7f8eaace36878f2cce73d" - version = "1.4.0" + revision = "bbcee2f5c9d5e94ca42c8b50ec847fec64a6c134" + version = "v1.4.2" [[projects]] branch = "master" @@ -92,8 +92,8 @@ ".", "log" ] - revision = "26b41036311f2da8242db402557a0dbd09dc83da" - version = "v2.6.0" + revision = "3658237ded108b4134956c1b3050349d93e7b895" + version = "v2.7.1" [[projects]] name = "github.com/emicklei/go-restful-swagger12" @@ -107,35 +107,29 @@ revision = "0ca9ea5df5451ffdf184b4428c902747c2c11cd7" version = "v1.0.0" -[[projects]] - name = "github.com/go-ini/ini" - packages = ["."] - revision = "6333e38ac20b8949a8dd68baa3650f4dee8f39f0" - version = "v1.33.0" - [[projects]] branch = "master" name = "github.com/go-openapi/jsonpointer" packages = ["."] - revision = "779f45308c19820f1a69e9a4cd965f496e0da10f" + revision = "3a0015ad55fa9873f41605d3e8f28cd279c32ab2" [[projects]] branch = "master" name = "github.com/go-openapi/jsonreference" packages = ["."] - revision = "36d33bfe519efae5632669801b180bf1a245da3b" + revision = "3fb327e6747da3043567ee86abd02bb6376b6be2" [[projects]] branch = "master" name = "github.com/go-openapi/spec" packages = ["."] - revision = "d8000b5bfbd1147255710505a27c735b6b2ae2ac" + revision = "bcff419492eeeb01f76e77d2ebc714dc97b607f5" [[projects]] branch = "master" name = "github.com/go-openapi/swag" packages = ["."] - revision = "ceb469cb0fdf2d792f28d771bc05da6c606f55e5" + revision = "811b1089cde9dad18d4d0c2d09fbdbf28dbd27a5" [[projects]] name = "github.com/gobwas/glob" @@ -171,7 +165,7 @@ branch = "master" name = "github.com/golang/groupcache" packages = ["lru"] - revision = "66deaeb636dff1ac7d938ce666d090556056a4b0" + revision = "24b0969c4cb722950103eed87108c8d291a8df00" [[projects]] name = "github.com/golang/protobuf" @@ -182,8 +176,8 @@ "ptypes/duration", "ptypes/timestamp" ] - revision = "925541529c1fa6821df4e44ce2723319eb2be768" - version = "v1.0.0" + revision = "b4deda0973fb4c70b50d226b1af49f3da59f5265" + version = "v1.1.0" [[projects]] branch = "master" @@ -192,14 +186,14 @@ revision = "e89373fe6b4a7413d7acd6da1725b83ef713e6e4" [[projects]] - branch = "master" name = "github.com/google/go-jsonnet" packages = [ ".", "ast", "parser" ] - revision = "405726fae23ace72b22c410a77b7bd825608f2c8" + revision = "dfddf2b4e3aec377b0dcdf247ff92e7d078b8179" + version = "v0.10.0" [[projects]] branch = "master" @@ -214,8 +208,8 @@ "compiler", "extensions" ] - revision = "ee43cbb60db7bd22502942cccbc39059117352ab" - version = "v0.1.0" + revision = "7c663266750e7d82587642f65e60bc4083f1f84e" + version = "v0.2.0" [[projects]] branch = "master" @@ -229,19 +223,19 @@ "openstack/utils", "pagination" ] - revision = "24d38e255f73b6eac52312031a9450f57e0c6b60" + revision = "5bd6a8f0bae89676ade094aa429146a25bf782e0" [[projects]] name = "github.com/gorilla/context" packages = ["."] - revision = "1ea25387ff6f684839d82767c1733ff4d4d15d0a" - version = "v1.1" + revision = "08b5f424b9271eedf6f9f0ce86cb9396ed337a42" + version = "v1.1.1" [[projects]] name = "github.com/gorilla/mux" packages = ["."] - revision = "53c1911da2b537f792e7cafcb446b05ffe33b996" - version = "v1.6.1" + revision = "e3702bed27f0d39777b0b37b664b6280e8ef8fbf" + version = "v1.6.2" [[projects]] branch = "master" @@ -286,8 +280,8 @@ [[projects]] name = "github.com/imdario/mergo" packages = ["."] - revision = "163f41321a19dd09362d4c63cc2489db2015f1f4" - version = "0.3.2" + revision = "9d5f1277e9a8ed20c3684bda8fde67c05628518c" + version = "v0.3.4" [[projects]] name = "github.com/inconshreveable/mousetrap" @@ -304,8 +298,8 @@ [[projects]] name = "github.com/json-iterator/go" packages = ["."] - revision = "3353055b2a1a5ae1b6a8dfde887a524e7088f3a2" - version = "1.1.2" + revision = "ca39e5af3ece67bbcda3d0f4f56a8e24d9f2dad4" + version = "1.1.3" [[projects]] name = "github.com/juju/ratelimit" @@ -339,7 +333,7 @@ "jlexer", "jwriter" ] - revision = "4a8a4c12c4d1b0c0a7de630426ce6dcb07141b17" + revision = "3fdea8d05856a0c8df22ed4bc71b3219245e4485" [[projects]] name = "github.com/mattn/go-isatty" @@ -356,8 +350,8 @@ [[projects]] name = "github.com/matttproud/golang_protobuf_extensions" packages = ["pbutil"] - revision = "3247c84500bff8d9fb6d579d800f20b3e091582c" - version = "v1.0.0" + revision = "c12348ce28de40eed0136aa2b644d0ee0650e56c" + version = "v1.0.1" [[projects]] name = "github.com/modern-go/concurrent" @@ -409,7 +403,7 @@ "internal/bitbucket.org/ww/goautoneg", "model" ] - revision = "6fb6fce6f8b75884b92e1889c150403fc0872c5e" + revision = "7600349dcfe1abd18d72d3a1770870d9800a7801" [[projects]] branch = "master" @@ -420,7 +414,7 @@ "nfs", "xfs" ] - revision = "1c7ff3de94ae006f58cba483a4c9c6d7c61e1d98" + revision = "94663424ae5ae9856b40a9f170762b4197024661" [[projects]] name = "github.com/rakyll/statik" @@ -437,20 +431,20 @@ [[projects]] name = "github.com/sirupsen/logrus" packages = ["."] - revision = "d682213848ed68c0a260ca37d6dd5ace8423f5ba" - version = "v1.0.4" + revision = "c155da19408a8799da419ed3eeb0cb5db0ad5dbc" + version = "v1.0.5" [[projects]] name = "github.com/spf13/cobra" packages = ["."] - revision = "7b2c5ac9fc04fc5efafb60700713d4fa609b777b" - version = "v0.0.1" + revision = "ef82de70bb3f60c65fb8eebacbb2d122ef517385" + version = "v0.0.3" [[projects]] name = "github.com/spf13/pflag" packages = ["."] - revision = "e57e3eeb33f795204c1ca35f56c44f83227c6e66" - version = "v1.0.0" + revision = "583c0c0531f06d5278b7d917446061adc344b5cd" + version = "v1.0.1" [[projects]] name = "github.com/stretchr/objx" @@ -508,7 +502,7 @@ "openpgp/s2k", "ssh/terminal" ] - revision = "85f98707c97e11569271e4d9b3d397e079c4f4d0" + revision = "8ac0e0d97ce45cd83d1d7243c060cb8461dda5e9" [[projects]] branch = "master" @@ -518,7 +512,7 @@ "tiff", "tiff/lzw" ] - revision = "12117c17ca67ffa1ce22e9409f3b0b0a93ac08c7" + revision = "af66defab954cb421ca110193eed9477c8541e2a" [[projects]] branch = "master" @@ -526,12 +520,14 @@ packages = [ "context", "context/ctxhttp", + "http/httpguts", "http2", "http2/hpack", "idna", - "lex/httplex" + "internal/timeseries", + "trace" ] - revision = "07e8617a6db2368fa55d4616f371ee1b1403c817" + revision = "1e491301e022f8f977054da4c2d852decd59571f" [[projects]] branch = "master" @@ -543,7 +539,7 @@ "jws", "jwt" ] - revision = "2f32c3ac0fa4fb807a0fcefb0b6f2468a0d99bd0" + revision = "1e0a3fa8ba9a5c9eb35c271780101fdaf1b205d7" [[projects]] branch = "master" @@ -552,7 +548,7 @@ "unix", "windows" ] - revision = "349b81fb5c64ec1734eb6ee148df25459ea48517" + revision = "9527bec2660bd847c050fda93a0f0c6dee0800bb" [[projects]] name = "golang.org/x/text" @@ -593,11 +589,49 @@ revision = "150dc57a1b433e64154302bdc40b6bb8aefa313a" version = "v1.0.0" +[[projects]] + branch = "master" + name = "google.golang.org/genproto" + packages = ["googleapis/rpc/status"] + revision = "81158efcc9f219c511e4d3c0d61a0e6e49c01a24" + +[[projects]] + name = "google.golang.org/grpc" + packages = [ + ".", + "balancer", + "balancer/base", + "balancer/roundrobin", + "channelz", + "codes", + "connectivity", + "credentials", + "encoding", + "encoding/proto", + "grpclb/grpc_lb_v1/messages", + "grpclog", + "health/grpc_health_v1", + "internal", + "keepalive", + "metadata", + "naming", + "peer", + "resolver", + "resolver/dns", + "resolver/passthrough", + "stats", + "status", + "tap", + "transport" + ] + revision = "7a6a684ca69eb4cae85ad0a484f2e531598c047b" + version = "v1.12.2" + [[projects]] name = "gopkg.in/inf.v0" packages = ["."] - revision = "3887ee99ecf07df5b447e9b00d9c0b2adaa9f3e4" - version = "v0.9.0" + revision = "d2d2541c53f18d2a059457998ce2876cc8e67cbf" + version = "v0.9.1" [[projects]] branch = "v2" @@ -614,8 +648,8 @@ [[projects]] name = "gopkg.in/yaml.v2" packages = ["."] - revision = "7f97868eec74b32b0982dd158a51a446d1da7eb5" - version = "v2.1.1" + revision = "5420a8b6744d3b0345ab293f6fcba19c978f1183" + version = "v2.2.1" [[projects]] name = "k8s.io/api" @@ -849,11 +883,14 @@ packages = [ "pkg/chartutil", "pkg/getter", + "pkg/helm", "pkg/helm/environment", "pkg/helm/helmpath", "pkg/ignore", "pkg/plugin", "pkg/proto/hapi/chart", + "pkg/proto/hapi/release", + "pkg/proto/hapi/services", "pkg/proto/hapi/version", "pkg/provenance", "pkg/repo", @@ -862,18 +899,18 @@ "pkg/urlutil", "pkg/version" ] - revision = "6af75a8fd72e2aa18a2b278cfe5c7a1c5feca7f2" - version = "v2.8.1" + revision = "20adb27c7c5868466912eebdf6664e7390ebe710" + version = "v2.9.1" [[projects]] branch = "master" name = "k8s.io/kube-openapi" packages = ["pkg/common"] - revision = "50ae88d24ede7b8bad68e23c805b5d3da5c8abaf" + revision = "8a9b82f00b3a86eac24681da3f9fe6c34c01cea2" [solve-meta] analyzer-name = "dep" analyzer-version = 1 - inputs-digest = "2ae98d1f21d5c51dc9f9e56416019c2c82d341da040f812784efdfb771fd039b" + inputs-digest = "51a3d63f3a76923268f58bb675a34afa4104db0bf48aaa746223f22940718c20" solver-name = "gps-cdcl" solver-version = 1 diff --git a/Gopkg.toml b/Gopkg.toml index 3e129012883..dd8ad5bdc0c 100644 --- a/Gopkg.toml +++ b/Gopkg.toml @@ -5,10 +5,6 @@ name = "github.com/ghodss/yaml" version = "1.0.0" -[[constraint]] - name = "github.com/go-ini/ini" - version = "1.32.0" - [[constraint]] branch = "master" name = "github.com/gosuri/uitable" @@ -25,6 +21,10 @@ name = "github.com/ksonnet/kubecfg" revision = "8634737e0b22b01bb93ffeb09a195d0a6d71a6e5" +[[constraint]] + name = "google.golang.org/grpc" + version = "1.7.2" + [[constraint]] name = "k8s.io/client-go" version = "5.0.1" @@ -39,6 +39,10 @@ name = "k8s.io/api" revision = "6c6dac0277229b9e9578c5ca3f74a4345d35cdc2" +[[constraint]] + name = "k8s.io/helm" + version = "2.9.1" + [[override]] name = "github.com/Azure/go-autorest" revision = "58f6f26e200fa5dfb40c9cd1c83f3e2c860d779d" diff --git a/Makefile b/Makefile index 8d2f6afb891..5441a8a7cc7 100644 --- a/Makefile +++ b/Makefile @@ -35,6 +35,9 @@ kubeapps/%: kubeapps/dashboard: docker build -t kubeapps/dashboard:$(VERSION) -f dashboard/Dockerfile dashboard/ +tiller-proxy: + docker build -t kubeapps/tiller-proxy -f ./cmd/tiller-proxy/Dockerfile . + test: $(EMBEDDED_STATIC) $(GO) test $(GO_PACKAGES) diff --git a/cmd/kubeapps/down.go b/cmd/kubeapps/down.go index 5bfd5a539a0..3d2b9ef78fc 100644 --- a/cmd/kubeapps/down.go +++ b/cmd/kubeapps/down.go @@ -25,6 +25,8 @@ import ( k8sErrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" + + yamlUtils "github.com/kubeapps/kubeapps/pkg/yaml" ) var downCmd = &cobra.Command{ @@ -65,7 +67,7 @@ var downCmd = &cobra.Command{ if err != nil { return fmt.Errorf("can't read kubeapps manifest: %v", err) } - objs, err := parseObjects(manifest) + objs, err := yamlUtils.ParseObjects(manifest) if err != nil { return fmt.Errorf("can't parse kubeapps manifest: %v", err) } diff --git a/cmd/kubeapps/root.go b/cmd/kubeapps/root.go index c117abd1f6c..8cd2f7d7741 100644 --- a/cmd/kubeapps/root.go +++ b/cmd/kubeapps/root.go @@ -17,24 +17,18 @@ limitations under the License. package kubeapps import ( - "bufio" "crypto/rand" "encoding/base64" "errors" "fmt" - "io" "os" "path/filepath" - "strings" "github.com/ksonnet/kubecfg/utils" "github.com/sirupsen/logrus" "github.com/spf13/cobra" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/util/yaml" "k8s.io/client-go/discovery" "k8s.io/client-go/dynamic" "k8s.io/client-go/rest" @@ -92,34 +86,6 @@ func logLevel(verbosity string) logrus.Level { } } -func parseObjects(manifest string) ([]*unstructured.Unstructured, error) { - r := strings.NewReader(manifest) - decoder := yaml.NewYAMLReader(bufio.NewReader(r)) - ret := []runtime.Object{} - for { - bytes, err := decoder.Read() - if err == io.EOF { - break - } else if err != nil { - return nil, err - } - if len(bytes) == 0 { - continue - } - jsondata, err := yaml.ToJSON(bytes) - if err != nil { - return nil, err - } - obj, _, err := unstructured.UnstructuredJSONScheme.Decode(jsondata, nil, nil) - if err != nil { - return nil, err - } - ret = append(ret, obj) - } - - return utils.FlattenToV1(ret), nil -} - func restClientPool() (dynamic.ClientPool, discovery.DiscoveryInterface, error) { conf, err := buildOutOfClusterConfig() if err != nil { diff --git a/cmd/kubeapps/root_test.go b/cmd/kubeapps/root_test.go index fd5e603755b..a4249edcf40 100644 --- a/cmd/kubeapps/root_test.go +++ b/cmd/kubeapps/root_test.go @@ -22,45 +22,6 @@ import ( "testing" ) -func TestParseObjectsSuccess(t *testing.T) { - m1 := `apiVersion: v1 -kind: Namespace -metadata: - annotations: {} - labels: - name: kubeless - name: kubeless` - rs, err := parseObjects(m1) - if err != nil { - t.Error(err) - } - if len(rs) != 1 { - t.Errorf("Expected 1 yaml element, got %v", len(rs)) - } - - // validate some fields of the parsed object - if rs[0].GetAPIVersion() != "v1" { - t.Errorf("Expected apiversion=v1, go %s", rs[0].GetAPIVersion()) - } - if rs[0].GetKind() != "Namespace" { - t.Errorf("Expected kind = Namespace, go %s", rs[0].GetKind()) - } -} - -func TestParseObjectFailure(t *testing.T) { - m2 := `apiVersion: v1 -kind: Namespace -metadata: - annotations: {} - labels: - name: kubeless - name: kubeless` - _, err := parseObjects(m2) - if err == nil { - t.Error("Expected parse fail, got success") - } -} - func TestGenerateEncodedRandomPassword(t *testing.T) { got, err := generateEncodedRandomPassword(12) if err != nil { diff --git a/cmd/kubeapps/up.go b/cmd/kubeapps/up.go index 666d16483a3..b217b1d3803 100644 --- a/cmd/kubeapps/up.go +++ b/cmd/kubeapps/up.go @@ -30,7 +30,6 @@ import ( "github.com/gosuri/uitable" "github.com/ksonnet/kubecfg/pkg/kubecfg" "github.com/ksonnet/kubecfg/utils" - "github.com/kubeapps/kubeapps/pkg/gke" "github.com/spf13/cobra" "k8s.io/api/apps/v1beta1" "k8s.io/api/core/v1" @@ -40,6 +39,9 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/client-go/discovery" "k8s.io/client-go/kubernetes" + + "github.com/kubeapps/kubeapps/pkg/gke" + yamlUtils "github.com/kubeapps/kubeapps/pkg/yaml" ) const ( @@ -113,7 +115,7 @@ List of components that kubeapps up installs: return fmt.Errorf("can't read kubeapps manifest: %v", err) } - objs, err := parseObjects(manifest) + objs, err := yamlUtils.ParseObjects(manifest) if err != nil { return fmt.Errorf("can't parse kubeapps manifest: %v", err) } diff --git a/cmd/tiller-proxy/.gitignore b/cmd/tiller-proxy/.gitignore new file mode 100644 index 00000000000..6ef760cbd1d --- /dev/null +++ b/cmd/tiller-proxy/.gitignore @@ -0,0 +1 @@ +proxy-static diff --git a/cmd/tiller-proxy/Dockerfile b/cmd/tiller-proxy/Dockerfile new file mode 100644 index 00000000000..6c552519cfe --- /dev/null +++ b/cmd/tiller-proxy/Dockerfile @@ -0,0 +1,9 @@ +FROM quay.io/deis/go-dev:v1.8.2 as builder +COPY . /go/src/github.com/kubeapps/kubeapps +WORKDIR /go/src/github.com/kubeapps/kubeapps +RUN CGO_ENABLED=0 go build -a -installsuffix cgo ./cmd/tiller-proxy + +FROM scratch +COPY --from=builder /go/src/github.com/kubeapps/kubeapps/tiller-proxy /proxy +EXPOSE 8080 +CMD ["/proxy"] diff --git a/cmd/tiller-proxy/Dockerfile.dev b/cmd/tiller-proxy/Dockerfile.dev new file mode 100644 index 00000000000..fe0313b974e --- /dev/null +++ b/cmd/tiller-proxy/Dockerfile.dev @@ -0,0 +1,4 @@ +FROM alpine:3.6 +RUN apk --no-cache add ca-certificates +COPY ./proxy-static /proxy +CMD ["/proxy"] diff --git a/cmd/tiller-proxy/README.md b/cmd/tiller-proxy/README.md new file mode 100644 index 00000000000..a94029f737f --- /dev/null +++ b/cmd/tiller-proxy/README.md @@ -0,0 +1,44 @@ +# Tiller Proxy + +This proxy is a service for Kubeapps that connects the Dashboard with Tiller. The goal of this Proxy is to provide a secure proxy for authenticated users to deploy, upgrade and delete charts in different namespaces. + +Part of the logic of this tool has been extracted from [helm-CRD](https://github.com/bitnami-labs/helm-crd). That tool has been deprecated in Kubeapps to avoid having to synchronize the state of a release in two different places (Tiller and the CRD object). + +The client should provide the header `Authorization: Bearer TOKEN` being TOKEN the Kubernetes API Token in order to perform any action. + +# Configuration + +It is possible to configure this proxy with the following flags: + +``` + --debug enable verbose output + --home string location of your Helm config. Overrides $HELM_HOME (default "/Users/andresmartinez/.helm") + --host string address of Tiller. Overrides $HELM_HOST + --kube-context string name of the kubeconfig context to use + --tiller-connection-timeout int the duration (in seconds) Helm will wait to establish a connection to tiller (default 300) + --tiller-namespace string namespace of Tiller (default "kube-system") +``` + +# Routes + +This proxy provides 6 different routes: + + - `GET` `/v1/releases`: List all the releases of the Tiller + - `GET` `/namespaces/{namespace}/releases`: List all the releases within a namespace + - `POST` `/namespaces/{namespace}/releases`: Create a new release + - `GET` `/namespaces/{namespace}/releases/{release}`: Get release info + - `PUT` `/namespaces/{namespace}/releases/{release}`: Update release info + - `DELETE` `/namespaces/{namespace}/releases/{release}`: Delete a release + +# Enabling authorization + +By default, authorization for any request is enabled (it can be disabled using the flag --disable-auth). If enabled, the client should have permissions to: + + - "Read" access to all the release resources in a release when doing a HTTP GET over a specific release. + - "Create" access to all the release resources in a release when doing a when doing an HTTP POST. + - "Create", "Update" and "Delete" permissions to all the release resources when doing an HTTP PUT to upgrade a release. + - "Delete" permissions to all the release resources when doing an HTTP PUT. + +Note that the user only needs a valid token in order to list releases. + +Right now, the only supported method for authentication is using a bearer token. diff --git a/cmd/tiller-proxy/handler.go b/cmd/tiller-proxy/handler.go new file mode 100644 index 00000000000..46119a72a42 --- /dev/null +++ b/cmd/tiller-proxy/handler.go @@ -0,0 +1,262 @@ +/* +Copyright (c) 2018 Bitnami + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "context" + "io/ioutil" + "net/http" + "strings" + "time" + + "github.com/gorilla/mux" + "github.com/kubeapps/common/response" + log "github.com/sirupsen/logrus" + "github.com/urfave/negroni" + "k8s.io/helm/pkg/chartutil" + "k8s.io/helm/pkg/proto/hapi/chart" + + "github.com/kubeapps/kubeapps/pkg/auth" + chartUtils "github.com/kubeapps/kubeapps/pkg/chart" +) + +const ( + defaultTimeoutSeconds = 180 +) + +var ( + netClient *http.Client +) + +func init() { + netClient = &http.Client{ + Timeout: time.Second * defaultTimeoutSeconds, + } +} + +// Context key type for request contexts +type contextKey int + +// userKey is the context key for the User data in the request context +const userKey contextKey = 0 + +// authGate implements middleware to check if the user is logged in before continuing +func authGate() negroni.HandlerFunc { + return func(w http.ResponseWriter, req *http.Request, next http.HandlerFunc) { + authHeader := strings.Split(req.Header.Get("Authorization"), "Bearer ") + if len(authHeader) != 2 { + response.NewErrorResponse(http.StatusUnauthorized, "Unauthorized").Write(w) + return + } + userAuth, err := auth.NewAuth(authHeader[1]) + if err != nil { + response.NewErrorResponse(http.StatusInternalServerError, err.Error()).Write(w) + return + } + err = userAuth.Validate() + if err != nil { + response.NewErrorResponse(http.StatusUnauthorized, err.Error()).Write(w) + return + } + ctx := context.WithValue(req.Context(), userKey, *userAuth) + next(w, req.WithContext(ctx)) + } +} + +// Params a key-value map of path params +type Params map[string]string + +// WithParams can be used to wrap handlers to take an extra arg for path params +type WithParams func(http.ResponseWriter, *http.Request, Params) + +func (h WithParams) ServeHTTP(w http.ResponseWriter, req *http.Request) { + vars := mux.Vars(req) + h(w, req, vars) +} + +func isNotFound(err error) bool { + return strings.Contains(err.Error(), "not found") +} + +func isAlreadyExists(err error) bool { + return strings.Contains(err.Error(), "is still in use") || strings.Contains(err.Error(), "already exists") +} + +func isForbidden(err error) bool { + return strings.Contains(err.Error(), "Unauthorized") +} + +func errorCode(err error) int { + errCode := http.StatusInternalServerError + if isAlreadyExists(err) { + errCode = http.StatusConflict + } else if isNotFound(err) { + errCode = http.StatusNotFound + } else if isForbidden(err) { + errCode = http.StatusForbidden + } + return errCode +} + +func getChart(req *http.Request) (*chartUtils.Details, *chart.Chart, error) { + defer req.Body.Close() + body, err := ioutil.ReadAll(req.Body) + if err != nil { + return nil, nil, err + } + chartDetails, err := chartUtils.ParseDetails(body) + if err != nil { + return nil, nil, err + } + ch, err := chartUtils.GetChart(chartDetails, kubeClient, netClient, chartutil.LoadArchive) + if err != nil { + return nil, nil, err + } + return chartDetails, ch, nil +} + +func logStatus(name string) { + status, err := proxy.GetReleaseStatus(name) + if err != nil { + log.Printf("Unable to fetch release status of %s: %v", name, err) + } else { + log.Printf("Release status: %s", status) + } +} + +func createRelease(w http.ResponseWriter, req *http.Request, params Params) { + log.Printf("Creating Helm Release") + chartDetails, ch, err := getChart(req) + if err != nil { + response.NewErrorResponse(errorCode(err), err.Error()).Write(w) + return + } + if !disableAuth { + manifest, err := proxy.ResolveManifest(params["namespace"], chartDetails.Values, ch) + if err != nil { + response.NewErrorResponse(errorCode(err), err.Error()).Write(w) + return + } + userAuth := req.Context().Value(userKey).(auth.UserAuth) + err = userAuth.CanI(params["namespace"], "create", manifest) + if err != nil { + response.NewErrorResponse(errorCode(err), err.Error()).Write(w) + return + } + } + rel, err := proxy.CreateRelease(chartDetails.ReleaseName, params["namespace"], chartDetails.Values, ch) + if err != nil { + response.NewErrorResponse(errorCode(err), err.Error()).Write(w) + return + } + log.Printf("Installed release %s", rel.Name) + logStatus(rel.Name) + response.NewDataResponse(*rel).Write(w) +} + +func upgradeRelease(w http.ResponseWriter, req *http.Request, params Params) { + log.Printf("Upgrading Helm Release") + chartDetails, ch, err := getChart(req) + if err != nil { + response.NewErrorResponse(errorCode(err), err.Error()).Write(w) + return + } + if !disableAuth { + manifest, err := proxy.ResolveManifest(params["namespace"], chartDetails.Values, ch) + if err != nil { + response.NewErrorResponse(errorCode(err), err.Error()).Write(w) + return + } + userAuth := req.Context().Value(userKey).(auth.UserAuth) + err = userAuth.CanI(params["namespace"], "create", manifest) + if err != nil { + response.NewErrorResponse(errorCode(err), err.Error()).Write(w) + return + } + } + rel, err := proxy.UpdateRelease(params["releaseName"], params["namespace"], chartDetails.Values, ch) + if err != nil { + response.NewErrorResponse(errorCode(err), err.Error()).Write(w) + return + } + log.Printf("Upgraded release %s", rel.Name) + logStatus(rel.Name) + response.NewDataResponse(*rel).Write(w) +} + +func listAllReleases(w http.ResponseWriter, req *http.Request) { + apps, err := proxy.ListReleases("") + if err != nil { + response.NewErrorResponse(errorCode(err), err.Error()).Write(w) + return + } + response.NewDataResponse(apps).Write(w) +} + +func listReleases(w http.ResponseWriter, req *http.Request, params Params) { + apps, err := proxy.ListReleases(params["namespace"]) + if err != nil { + response.NewErrorResponse(errorCode(err), err.Error()).Write(w) + return + } + response.NewDataResponse(apps).Write(w) +} + +func getRelease(w http.ResponseWriter, req *http.Request, params Params) { + rel, err := proxy.GetRelease(params["releaseName"], params["namespace"]) + if err != nil { + response.NewErrorResponse(errorCode(err), err.Error()).Write(w) + return + } + if !disableAuth { + manifest, err := proxy.ResolveManifest(params["namespace"], rel.Config.Raw, rel.Chart) + if err != nil { + response.NewErrorResponse(errorCode(err), err.Error()).Write(w) + return + } + userAuth := req.Context().Value(userKey).(auth.UserAuth) + err = userAuth.CanI(params["namespace"], "get", manifest) + } + response.NewDataResponse(*rel).Write(w) +} + +func deleteRelease(w http.ResponseWriter, req *http.Request, params Params) { + if !disableAuth { + rel, err := proxy.GetRelease(params["releaseName"], params["namespace"]) + if err != nil { + response.NewErrorResponse(errorCode(err), err.Error()).Write(w) + return + } + manifest, err := proxy.ResolveManifest(params["namespace"], rel.Config.Raw, rel.Chart) + if err != nil { + response.NewErrorResponse(errorCode(err), err.Error()).Write(w) + return + } + userAuth := req.Context().Value(userKey).(auth.UserAuth) + err = userAuth.CanI(params["namespace"], "delete", manifest) + if err != nil { + response.NewErrorResponse(errorCode(err), err.Error()).Write(w) + return + } + } + err := proxy.DeleteRelease(params["releaseName"], params["namespace"]) + if err != nil { + response.NewErrorResponse(errorCode(err), err.Error()).Write(w) + return + } + w.Write([]byte("OK")) +} diff --git a/cmd/tiller-proxy/main.go b/cmd/tiller-proxy/main.go new file mode 100644 index 00000000000..d0649ee4e3a --- /dev/null +++ b/cmd/tiller-proxy/main.go @@ -0,0 +1,116 @@ +/* +Copyright (c) 2018 Bitnami + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "net/http" + "os" + + "github.com/gorilla/mux" + "github.com/heptiolabs/healthcheck" + log "github.com/sirupsen/logrus" + "github.com/spf13/pflag" + "github.com/urfave/negroni" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" + "k8s.io/helm/pkg/helm" + "k8s.io/helm/pkg/helm/environment" + + tillerProxy "github.com/kubeapps/kubeapps/pkg/proxy" +) + +var ( + settings environment.EnvSettings + proxy *tillerProxy.Proxy + kubeClient kubernetes.Interface + disableAuth bool +) + +func init() { + settings.AddFlags(pflag.CommandLine) + pflag.BoolVar(&disableAuth, "disable-auth", false, "Disable authorization check") +} + +func main() { + pflag.Parse() + + // set defaults from environment + settings.Init(pflag.CommandLine) + + config, err := rest.InClusterConfig() + if err != nil { + log.Fatalf("Unable to get cluter config: %v", err) + } + + kubeClient, err = kubernetes.NewForConfig(config) + if err != nil { + log.Fatalf("Unable to create a kubernetes client: %v", err) + } + + log.Printf("Using tiller host: %s", settings.TillerHost) + helmClient := helm.NewClient(helm.Host(settings.TillerHost)) + err = helmClient.PingTiller() + if err != nil { + log.Fatalf("Unable to connect to Tiller: %v", err) + } + + proxy = tillerProxy.NewProxy(kubeClient, helmClient) + + r := mux.NewRouter() + + // Healthcheck + health := healthcheck.NewHandler() + r.Handle("/live", health) + r.Handle("/ready", health) + + authGate := authGate() + + // Routes + apiv1 := r.PathPrefix("/v1").Subrouter() + apiv1.Methods("GET").Path("/releases").HandlerFunc(listAllReleases) + apiv1.Methods("GET").Path("/namespaces/{namespace}/releases").Handler(WithParams(listReleases)) + apiv1.Methods("POST").Path("/namespaces/{namespace}/releases").Handler(negroni.New( + authGate, + negroni.Wrap(WithParams(createRelease)), + )) + apiv1.Methods("GET").Path("/namespaces/{namespace}/releases/{releaseName}").Handler(negroni.New( + authGate, + negroni.Wrap(WithParams(getRelease)), + )) + apiv1.Methods("PUT").Path("/namespaces/{namespace}/releases/{releaseName}").Handler(negroni.New( + authGate, + negroni.Wrap(WithParams(upgradeRelease)), + )) + apiv1.Methods("DELETE").Path("/namespaces/{namespace}/releases/{releaseName}").Handler(negroni.New( + authGate, + negroni.Wrap(WithParams(deleteRelease)), + )) + + n := negroni.Classic() + n.UseHandler(r) + + port := os.Getenv("PORT") + if port == "" { + port = "8080" + } + addr := ":" + port + log.WithFields(log.Fields{"addr": addr}).Info("Started Tiller Proxy") + err = http.ListenAndServe(addr, n) + if err != nil { + log.Fatalf("Unable to start the server: %v", err) + } +} diff --git a/cmd/tiller-proxy/tiller-proxy b/cmd/tiller-proxy/tiller-proxy new file mode 100755 index 00000000000..b51189fb6ad Binary files /dev/null and b/cmd/tiller-proxy/tiller-proxy differ diff --git a/pkg/auth/auth.go b/pkg/auth/auth.go new file mode 100644 index 00000000000..b56c9b57c05 --- /dev/null +++ b/pkg/auth/auth.go @@ -0,0 +1,162 @@ +/* +Copyright (c) 2018 Bitnami + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package auth + +import ( + "fmt" + + authorizationapi "k8s.io/api/authorization/v1" + discovery "k8s.io/client-go/discovery" + "k8s.io/client-go/kubernetes" + authorizationv1 "k8s.io/client-go/kubernetes/typed/authorization/v1" + "k8s.io/client-go/rest" + + yamlUtils "github.com/kubeapps/kubeapps/pkg/yaml" +) + +// UserAuth contains information to check user permissions +type UserAuth struct { + authCli authorizationv1.AuthorizationV1Interface + discoveryCli discovery.DiscoveryInterface +} + +type resource struct { + APIVersion string + Kind string + Namespace string +} + +// NewAuth creates an auth agent +func NewAuth(token string) (*UserAuth, error) { + config, err := rest.InClusterConfig() + if err != nil { + return nil, err + } + // Overwrite default token + config.BearerToken = token + kubeClient, err := kubernetes.NewForConfig(config) + authCli := kubeClient.AuthorizationV1() + discoveryCli := kubeClient.Discovery() + + return &UserAuth{authCli, discoveryCli}, nil +} + +// Validate checks if the given token is valid +func (u *UserAuth) Validate() error { + _, err := u.authCli.SelfSubjectRulesReviews().Create(&authorizationapi.SelfSubjectRulesReview{ + Spec: authorizationapi.SelfSubjectRulesReviewSpec{ + Namespace: "default", + }, + }) + return err +} + +func resolve(discoveryCli discovery.DiscoveryInterface, groupVersion, kind string) (string, error) { + resourceList, err := discoveryCli.ServerResourcesForGroupVersion(groupVersion) + if err != nil { + return "", nil + } + for _, r := range resourceList.APIResources { + if r.Kind == kind { + return r.Name, nil + } + } + return "", fmt.Errorf("Unable to find the kind %s in the resource group %s", kind, groupVersion) +} + +func (u *UserAuth) canPerform(verb, group, resource, namespace string) (bool, error) { + res, err := u.authCli.SelfSubjectAccessReviews().Create(&authorizationapi.SelfSubjectAccessReview{ + Spec: authorizationapi.SelfSubjectAccessReviewSpec{ + ResourceAttributes: &authorizationapi.ResourceAttributes{ + Group: group, + Resource: resource, + Verb: verb, + Namespace: namespace, + }, + }, + }) + if err != nil { + return false, err + } + return res.Status.Allowed, nil +} + +func (u *UserAuth) getResourcesToCheck(namespace, manifest string) ([]resource, error) { + objs, err := yamlUtils.ParseObjects(manifest) + if err != nil { + return []resource{}, err + } + resourcesToCheck := map[string]*resource{} + result := []resource{} + for _, obj := range objs { + // Object can specify a different namespace, if not use the default one + ns := obj.GetNamespace() + if ns == "" { + ns = namespace + } + resourceToCheck := fmt.Sprintf("%s/%s/%s", ns, obj.GetAPIVersion(), obj.GetKind()) + if resourcesToCheck[resourceToCheck] == nil { + r := resource{obj.GetAPIVersion(), obj.GetKind(), ns} + resourcesToCheck[resourceToCheck] = &r + result = append(result, r) + } + } + return result, nil +} + +func (u *UserAuth) isAllowed(verb string, itemsToCheck []resource) error { + for _, i := range itemsToCheck { + resource, err := resolve(u.discoveryCli, i.APIVersion, i.Kind) + if err != nil { + return err + } + group := i.APIVersion + if group == "v1" { + // The group should be empty for the core API group + group = "" + } + allowed, _ := u.canPerform(verb, group, resource, i.Namespace) + if !allowed { + return fmt.Errorf("Unauthorized to %s %s/%s in the %s namespace", verb, i.APIVersion, resource, i.Namespace) + } + } + return nil +} + +// CanI returns if the user can perform the given action with the given chart and parameters +func (u *UserAuth) CanI(namespace, action, manifest string) error { + resources, err := u.getResourcesToCheck(namespace, manifest) + if err != nil { + return err + } + switch action { + case "upgrade": + // For upgrading a chart the user should be able to create, update and delete resources + for _, v := range []string{"create", "update", "delete"} { + err = u.isAllowed(v, resources) + if err != nil { + return err + } + } + default: + err := u.isAllowed(action, resources) + if err != nil { + return err + } + } + return nil +} diff --git a/pkg/auth/auth_test.go b/pkg/auth/auth_test.go new file mode 100644 index 00000000000..57b6623fa4e --- /dev/null +++ b/pkg/auth/auth_test.go @@ -0,0 +1,84 @@ +/* +Copyright (c) 2018 Bitnami + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package auth + +import ( + "strings" + "testing" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + fakediscovery "k8s.io/client-go/discovery/fake" + "k8s.io/client-go/kubernetes/fake" +) + +func TestCanI(t *testing.T) { + resourceList := metav1.APIResourceList{ + GroupVersion: "v1", + APIResources: []metav1.APIResource{ + {Name: "pods", Kind: "Pod"}, + }, + } + cli := fake.NewSimpleClientset() + fakeDiscovery, ok := cli.Discovery().(*fakediscovery.FakeDiscovery) + if !ok { + t.Fatalf("couldn't convert Discovery() to *FakeDiscovery") + } + fakeDiscovery.Resources = []*metav1.APIResourceList{&resourceList} + auth := UserAuth{ + authCli: cli.AuthorizationV1(), + discoveryCli: cli.Discovery(), + } + manifest := `--- +apiVersion: v1 +kind: Pod +` + err := auth.CanI("foo", "create", manifest) + // Fake client returns an empty result so it will deny any request + if !strings.Contains(err.Error(), "Unauthorized to create v1/pods in the foo namespace") { + t.Errorf("Unexpected error: %v", err) + } +} + +func TestNamespacedCanI(t *testing.T) { + resourceList := metav1.APIResourceList{ + GroupVersion: "v1", + APIResources: []metav1.APIResource{ + {Name: "pods", Kind: "Pod"}, + }, + } + cli := fake.NewSimpleClientset() + fakeDiscovery, ok := cli.Discovery().(*fakediscovery.FakeDiscovery) + if !ok { + t.Fatalf("couldn't convert Discovery() to *FakeDiscovery") + } + fakeDiscovery.Resources = []*metav1.APIResourceList{&resourceList} + auth := UserAuth{ + authCli: cli.AuthorizationV1(), + discoveryCli: cli.Discovery(), + } + manifest := `--- +apiVersion: v1 +kind: Pod +metadata: + namespace: bar +` + err := auth.CanI("foo", "create", manifest) + // Fake client returns an empty result so it will deny any request + if !strings.Contains(err.Error(), "Unauthorized to create v1/pods in the bar namespace") { + t.Errorf("Unexpected error: %v", err) + } +} diff --git a/pkg/chart/chart.go b/pkg/chart/chart.go new file mode 100644 index 00000000000..ac99db37222 --- /dev/null +++ b/pkg/chart/chart.go @@ -0,0 +1,238 @@ +/* +Copyright (c) 2018 Bitnami + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package chart + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "log" + "net/http" + "net/url" + "os" + "strings" + + "github.com/ghodss/yaml" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" + "k8s.io/helm/pkg/proto/hapi/chart" + "k8s.io/helm/pkg/repo" +) + +const ( + defaultNamespace = metav1.NamespaceSystem + defaultRepoURL = "https://kubernetes-charts.storage.googleapis.com" +) + +// Details contains the information to retrieve a Chart +type Details struct { + // RepoURL is the URL of the repository. Defaults to stable repo. + RepoURL string `json:"repoUrl,omitempty"` + // ChartName is the name of the chart within the repo. + ChartName string `json:"chartName"` + // ReleaseName is the Name of the release given to Tiller. + ReleaseName string `json:"releaseName"` + // Version is the chart version. + Version string `json:"version"` + // Auth is the authentication. + Auth Auth `json:"auth,omitempty"` + // Values is a string containing (unparsed) YAML values. + Values string `json:"values,omitempty"` +} + +// Auth contains the information to authenticate against a private registry +type Auth struct { + // Header is header based Authorization + Header *AuthHeader `json:"header,omitempty"` +} + +// AuthHeader contains the secret information for authenticate +type AuthHeader struct { + // Selects a key of a secret in the pod's namespace + SecretKeyRef corev1.SecretKeySelector `json:"secretKeyRef,omitempty"` +} + +// HTTPClient Interface to perform HTTP requests +type HTTPClient interface { + Do(req *http.Request) (*http.Response, error) +} + +func getReq(rawURL, authHeader string) (*http.Request, error) { + parsedURL, err := url.ParseRequestURI(rawURL) + if err != nil { + return nil, err + } + + req, err := http.NewRequest("GET", parsedURL.String(), nil) + if err != nil { + return nil, err + } + + if len(authHeader) > 0 { + req.Header.Set("Authorization", authHeader) + } + return req, nil +} + +func readResponseBody(res *http.Response) ([]byte, error) { + if res != nil { + defer res.Body.Close() + } + + if res.StatusCode != http.StatusOK { + return nil, fmt.Errorf("chart download request failed") + } + + body, err := ioutil.ReadAll(res.Body) + if err != nil { + return nil, err + } + return body, nil +} + +func parseIndex(data []byte) (*repo.IndexFile, error) { + index := &repo.IndexFile{} + err := yaml.Unmarshal(data, index) + if err != nil { + return index, err + } + index.SortEntries() + return index, nil +} + +// fetchRepoIndex returns a Helm repository +func fetchRepoIndex(netClient *HTTPClient, repoURL string, authHeader string) (*repo.IndexFile, error) { + req, err := getReq(repoURL, authHeader) + if err != nil { + return nil, err + } + + res, err := (*netClient).Do(req) + if err != nil { + return nil, err + } + data, err := readResponseBody(res) + if err != nil { + return nil, err + } + + return parseIndex(data) +} + +func resolveChartURL(index, chart string) (string, error) { + indexURL, err := url.Parse(strings.TrimSpace(index)) + if err != nil { + return "", err + } + chartURL, err := indexURL.Parse(strings.TrimSpace(chart)) + if err != nil { + return "", err + } + return chartURL.String(), nil +} + +// findChartInRepoIndex returns the URL of a chart given a Helm repository and its name and version +func findChartInRepoIndex(repoIndex *repo.IndexFile, repoURL, chartName, chartVersion string) (string, error) { + errMsg := fmt.Sprintf("chart %q", chartName) + if chartVersion != "" { + errMsg = fmt.Sprintf("%s version %q", errMsg, chartVersion) + } + cv, err := repoIndex.Get(chartName, chartVersion) + if err != nil { + return "", fmt.Errorf("%s not found in repository", errMsg) + } + if len(cv.URLs) == 0 { + return "", fmt.Errorf("%s has no downloadable URLs", errMsg) + } + return resolveChartURL(repoURL, cv.URLs[0]) +} + +// LoadChart should return a Chart struct from an IOReader +type LoadChart func(in io.Reader) (*chart.Chart, error) + +// fetchChart returns the Chart content given an URL and the auth header if needed +func fetchChart(netClient *HTTPClient, chartURL, authHeader string, load LoadChart) (*chart.Chart, error) { + req, err := getReq(chartURL, authHeader) + if err != nil { + return nil, err + } + + res, err := (*netClient).Do(req) + if err != nil { + return nil, err + } + data, err := readResponseBody(res) + if err != nil { + return nil, err + } + return load(bytes.NewReader(data)) +} + +// ParseDetails return Chart details +func ParseDetails(data []byte) (*Details, error) { + details := &Details{} + err := json.Unmarshal(data, details) + if err != nil { + return nil, fmt.Errorf("Unable to parse request body: %v", err) + } + return details, nil +} + +// GetChart retrieves and loads a Chart from a registry +func GetChart(details *Details, kubeClient kubernetes.Interface, netClient HTTPClient, load LoadChart) (*chart.Chart, error) { + repoURL := details.RepoURL + if repoURL == "" { + // FIXME: Make configurable + repoURL = defaultRepoURL + } + repoURL = strings.TrimSuffix(strings.TrimSpace(repoURL), "/") + "/index.yaml" + + authHeader := "" + if details.Auth.Header != nil { + namespace := os.Getenv("POD_NAMESPACE") + if namespace == "" { + namespace = defaultNamespace + } + + secret, err := kubeClient.Core().Secrets(namespace).Get(details.Auth.Header.SecretKeyRef.Name, metav1.GetOptions{}) + if err != nil { + return nil, err + } + authHeader = string(secret.Data[details.Auth.Header.SecretKeyRef.Key]) + } + + log.Printf("Downloading repo %s index...", repoURL) + repoIndex, err := fetchRepoIndex(&netClient, repoURL, authHeader) + if err != nil { + return nil, err + } + + chartURL, err := findChartInRepoIndex(repoIndex, repoURL, details.ChartName, details.Version) + if err != nil { + return nil, err + } + + log.Printf("Downloading %s ...", chartURL) + chartRequested, err := fetchChart(&netClient, chartURL, authHeader, load) + if err != nil { + return nil, err + } + return chartRequested, nil +} diff --git a/pkg/chart/chart_test.go b/pkg/chart/chart_test.go new file mode 100644 index 00000000000..45bae54e836 --- /dev/null +++ b/pkg/chart/chart_test.go @@ -0,0 +1,207 @@ +/* +Copyright (c) 2018 Bitnami + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package chart + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "reflect" + "testing" + "time" + + "github.com/arschles/assert" + corev1 "k8s.io/api/core/v1" + "k8s.io/client-go/kubernetes/fake" + "k8s.io/helm/pkg/proto/hapi/chart" + "k8s.io/helm/pkg/repo" +) + +func Test_resolveChartURL(t *testing.T) { + tests := []struct { + name string + baseURL string + chartURL string + wantedURL string + }{ + { + "absolute url", + "http://www.google.com", + "http://charts.example.com/repo/wordpress-0.1.0.tgz", + "http://charts.example.com/repo/wordpress-0.1.0.tgz", + }, + { + "relative, repo url", + "http://charts.example.com/repo/", + "wordpress-0.1.0.tgz", + "http://charts.example.com/repo/wordpress-0.1.0.tgz", + }, + { + "relative, repo index url", + "http://charts.example.com/repo/index.yaml", + "wordpress-0.1.0.tgz", + "http://charts.example.com/repo/wordpress-0.1.0.tgz", + }, + { + "relative, repo url - no trailing slash", + "http://charts.example.com/repo", + "wordpress-0.1.0.tgz", + "http://charts.example.com/wordpress-0.1.0.tgz", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + chartURL, err := resolveChartURL(tt.baseURL, tt.chartURL) + assert.NoErr(t, err) + assert.Equal(t, chartURL, tt.wantedURL, "url") + }) + } +} + +func TestFindChartInRepoIndex(t *testing.T) { + name := "foo" + version := "v1.0.0" + chartURL := "wordpress-0.1.0.tgz" + repoURL := "http://charts.example.com/repo/" + expectedURL := fmt.Sprintf("%s%s", repoURL, chartURL) + + chartMeta := chart.Metadata{Name: name, Version: version} + chartVersion := repo.ChartVersion{URLs: []string{chartURL}} + chartVersion.Metadata = &chartMeta + chartVersions := []*repo.ChartVersion{&chartVersion} + entries := map[string]repo.ChartVersions{} + entries[name] = chartVersions + index := &repo.IndexFile{APIVersion: "v1", Generated: time.Now(), Entries: entries} + + res, err := findChartInRepoIndex(index, repoURL, name, version) + if err != nil { + t.Errorf("Unexpected error %v", err) + } + if res != expectedURL { + t.Errorf("Expecting %s to be resolved as %s", res, expectedURL) + } +} + +func TestParseDetails(t *testing.T) { + data := `{ + "repoUrl": "foo.com", + "chartName": "test", + "releaseName": "foo", + "version": "1.0.0", + "values": "foo: bar", + "auth": { + "header": { + "secretKeyRef": { + "key": "bar" + } + } + } + }` + expectedDetails := Details{ + RepoURL: "foo.com", + ChartName: "test", + ReleaseName: "foo", + Version: "1.0.0", + Values: "foo: bar", + Auth: Auth{ + Header: &AuthHeader{ + SecretKeyRef: corev1.SecretKeySelector{ + Key: "bar", + }, + }, + }, + } + details, err := ParseDetails([]byte(data)) + if err != nil { + t.Fatalf("Unexpected error: %v", err) + } + if !reflect.DeepEqual(expectedDetails, *details) { + t.Errorf("%v != %v", expectedDetails, *details) + } +} + +// Fake server for repositories and charts +type fakeHTTPClient struct { + repoURLs []string + chartURLs []string + index *repo.IndexFile +} + +func (f *fakeHTTPClient) Do(h *http.Request) (*http.Response, error) { + for _, repoURL := range f.repoURLs { + if h.URL.String() == fmt.Sprintf("%sindex.yaml", repoURL) { + // Return fake chart index (not customizable per repo) + body, err := json.Marshal(*f.index) + if err != nil { + fmt.Printf("Error! %v", err) + } + return &http.Response{StatusCode: 200, Body: ioutil.NopCloser(bytes.NewReader(body))}, nil + } + } + for _, chartURL := range f.chartURLs { + if h.URL.String() == chartURL { + // Simulate download time + time.Sleep(100 * time.Millisecond) + // Fake chart response + return &http.Response{StatusCode: 200, Body: ioutil.NopCloser(bytes.NewReader([]byte{}))}, nil + } + } + // Unexpected path + return &http.Response{StatusCode: 404}, fmt.Errorf("Unexpected path") +} + +func fakeLoadChart(in io.Reader) (*chart.Chart, error) { + return &chart.Chart{}, nil +} + +func newHTTPClient(charts []Details) fakeHTTPClient { + var repoURLs []string + var chartURLs []string + entries := map[string]repo.ChartVersions{} + // Populate Chart registry with content of the given helmReleases + for _, ch := range charts { + repoURLs = append(repoURLs, ch.RepoURL) + chartMeta := chart.Metadata{Name: ch.ChartName, Version: ch.Version} + chartURL := fmt.Sprintf("%s%s-%s.tgz", ch.RepoURL, ch.ChartName, ch.Version) + chartURLs = append(chartURLs, chartURL) + chartVersion := repo.ChartVersion{Metadata: &chartMeta, URLs: []string{chartURL}} + chartVersions := []*repo.ChartVersion{&chartVersion} + entries[ch.ChartName] = chartVersions + } + index := &repo.IndexFile{APIVersion: "v1", Generated: time.Now(), Entries: entries} + return fakeHTTPClient{repoURLs, chartURLs, index} +} + +func TestGetChart(t *testing.T) { + target := Details{ + RepoURL: "http://foo.com/", + ChartName: "test", + ReleaseName: "foo", + Version: "1.0.0", + } + httpClient := newHTTPClient([]Details{target}) + kubeClient := fake.NewSimpleClientset() + ch, err := GetChart(&target, kubeClient, &httpClient, fakeLoadChart) + if err != nil { + t.Errorf("Unexpected error: %v", err) + } + if ch == nil { + t.Errorf("It should return a Chart") + } +} diff --git a/pkg/proxy/proxy.go b/pkg/proxy/proxy.go new file mode 100644 index 00000000000..07e72ca5f0b --- /dev/null +++ b/pkg/proxy/proxy.go @@ -0,0 +1,220 @@ +/* +Copyright (c) 2018 Bitnami + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package proxy + +import ( + "fmt" + "strings" + "sync" + + log "github.com/sirupsen/logrus" + "google.golang.org/grpc" + "k8s.io/client-go/kubernetes" + "k8s.io/helm/pkg/helm" + "k8s.io/helm/pkg/proto/hapi/chart" + "k8s.io/helm/pkg/proto/hapi/release" +) + +const ( + defaultTimeoutSeconds = 180 +) + +var ( + appMutex map[string]*sync.Mutex + releaseStatuses []release.Status_Code +) + +func init() { + appMutex = make(map[string]*sync.Mutex) + releaseStatuses = []release.Status_Code{ + release.Status_UNKNOWN, + release.Status_DEPLOYED, + release.Status_DELETED, + release.Status_DELETING, + release.Status_FAILED, + release.Status_PENDING_INSTALL, + release.Status_PENDING_UPGRADE, + release.Status_PENDING_ROLLBACK, + } +} + +// Proxy contains all the elements to contact Tiller and the K8s API +type Proxy struct { + kubeClient kubernetes.Interface + helmClient helm.Interface +} + +func isNotFound(err error) bool { + // Ideally this would be `grpc.Code(err) == codes.NotFound`, + // but it seems helm doesn't return grpc codes + return strings.Contains(grpc.ErrorDesc(err), "not found") +} + +// NewProxy creates a Proxy +func NewProxy(kubeClient kubernetes.Interface, helmClient helm.Interface) *Proxy { + return &Proxy{ + kubeClient: kubeClient, + helmClient: helmClient, + } +} + +// AppOverview represents the basics of a release +type AppOverview struct { + ReleaseName string `json:"releaseName"` + Version string `json:"version"` + Namespace string `json:"namespace"` +} + +func (p *Proxy) get(name, namespace string) (*release.Release, error) { + list, err := p.helmClient.ListReleases(helm.ReleaseListStatuses(releaseStatuses)) + if err != nil { + return nil, fmt.Errorf("Unable to list helm releases: %v", err) + } + var rel *release.Release + for _, r := range list.Releases { + if (namespace == "" || namespace == r.Namespace) && r.Name == name { + rel = r + break + } + } + if rel == nil { + return nil, fmt.Errorf("Release %s not found in namespace %s", name, namespace) + } + return rel, nil +} + +// GetReleaseStatus prints the status of the given release if exists +func (p *Proxy) GetReleaseStatus(relName string) (release.Status_Code, error) { + status, err := p.helmClient.ReleaseStatus(relName) + if err == nil { + if status.Info != nil && status.Info.Status != nil { + return status.Info.Status.Code, nil + } + } + return release.Status_Code(0), fmt.Errorf("Unable to fetch release status for %s: %v", relName, err) +} + +// ResolveManifest returns a manifest given the chart parameters +func (p *Proxy) ResolveManifest(namespace, values string, ch *chart.Chart) (string, error) { + // We use the release returned after running a dry-run to know the elements to install + resDry, err := p.helmClient.InstallReleaseFromChart( + ch, + namespace, + helm.ValueOverrides([]byte(values)), + helm.ReleaseName(""), + helm.InstallDryRun(true), + ) + if err != nil { + return "", err + } + // The manifest returned has some extra new lines at the beginning + return strings.TrimLeft(resDry.Release.Manifest, "\n"), nil +} + +// ListReleases list releases in a specific namespace if given +func (p *Proxy) ListReleases(namespace string) ([]AppOverview, error) { + list, err := p.helmClient.ListReleases( + helm.ReleaseListStatuses(releaseStatuses), + ) + if err != nil { + return []AppOverview{}, fmt.Errorf("Unable to list helm releases: %v", err) + } + appList := []AppOverview{} + if list != nil { + for _, r := range list.Releases { + if namespace == "" || namespace == r.Namespace { + appList = append(appList, AppOverview{r.Name, r.Chart.Metadata.Version, r.Namespace}) + } + } + } + return appList, nil +} + +func lock(name string) { + if appMutex[name] == nil { + appMutex[name] = &sync.Mutex{} + } + appMutex[name].Lock() +} + +func unlock(name string) { + appMutex[name].Unlock() +} + +// CreateRelease creates a tiller release +func (p *Proxy) CreateRelease(name, namespace, values string, ch *chart.Chart) (*release.Release, error) { + lock(name) + defer unlock(name) + log.Printf("Installing release %s into namespace %s", name, namespace) + res, err := p.helmClient.InstallReleaseFromChart( + ch, + namespace, + helm.ValueOverrides([]byte(values)), + helm.ReleaseName(name), + ) + if err != nil { + return nil, fmt.Errorf("Unable create release: %v", err) + } + return res.GetRelease(), nil +} + +// UpdateRelease upgrades a tiller release +func (p *Proxy) UpdateRelease(name, namespace string, values string, ch *chart.Chart) (*release.Release, error) { + lock(name) + defer unlock(name) + // Check if the release already exists + _, err := p.get(name, namespace) + if err != nil && isNotFound(err) { + return nil, fmt.Errorf("Release %s not found in the namespace %s. Unable to update it", name, namespace) + } else if err != nil { + return nil, err + } + log.Printf("Updating release %s", name) + res, err := p.helmClient.UpdateReleaseFromChart( + name, + ch, + helm.UpdateValueOverrides([]byte(values)), + //helm.UpgradeForce(true), ? + ) + if err != nil { + return nil, fmt.Errorf("Unable to update release: %v", err) + } + return res.GetRelease(), nil +} + +// GetRelease returns the info of a release +func (p *Proxy) GetRelease(name, namespace string) (*release.Release, error) { + lock(name) + defer unlock(name) + return p.get(name, namespace) +} + +// DeleteRelease deletes a release +func (p *Proxy) DeleteRelease(name, namespace string) error { + lock(name) + defer unlock(name) + // Validate that the release actually belongs to the namespace + _, err := p.get(name, namespace) + if err != nil { + return err + } + _, err = p.helmClient.DeleteRelease(name, helm.DeletePurge(true)) + if err != nil { + return fmt.Errorf("Unable to delete release: %v", err) + } + return nil +} diff --git a/pkg/proxy/proxy_test.go b/pkg/proxy/proxy_test.go new file mode 100644 index 00000000000..51d1917b359 --- /dev/null +++ b/pkg/proxy/proxy_test.go @@ -0,0 +1,322 @@ +/* +Copyright (c) 2018 Bitnami + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package proxy + +import ( + "reflect" + "strings" + "testing" + "time" + + "k8s.io/client-go/kubernetes/fake" + "k8s.io/helm/pkg/helm" + "k8s.io/helm/pkg/proto/hapi/chart" + "k8s.io/helm/pkg/proto/hapi/release" +) + +func newFakeProxy(existingTillerReleases []AppOverview) *Proxy { + helmClient := helm.FakeClient{} + // Populate Fake helm client with releases + for _, r := range existingTillerReleases { + helmClient.Rels = append(helmClient.Rels, &release.Release{ + Name: r.ReleaseName, + Namespace: r.Namespace, + Chart: &chart.Chart{ + Metadata: &chart.Metadata{ + Version: r.Version, + }, + }, + }) + } + kubeClient := fake.NewSimpleClientset() + return NewProxy(kubeClient, &helmClient) +} + +func TestListAllReleases(t *testing.T) { + app1 := AppOverview{"foo", "1.0.0", "my_ns"} + app2 := AppOverview{"bar", "1.0.0", "other_ns"} + proxy := newFakeProxy([]AppOverview{app1, app2}) + + // Should return all the releases if no namespace is given + releases, err := proxy.ListReleases("") + if err != nil { + t.Fatalf("Unexpected error %v", err) + } + if len(releases) != 2 { + t.Errorf("It should return both releases") + } + if !reflect.DeepEqual([]AppOverview{app1, app2}, releases) { + t.Errorf("Unexpected list of releases %v", releases) + } +} + +func TestListNamespacedRelease(t *testing.T) { + app1 := AppOverview{"foo", "1.0.0", "my_ns"} + app2 := AppOverview{"bar", "1.0.0", "other_ns"} + proxy := newFakeProxy([]AppOverview{app1, app2}) + + // Should return all the releases if no namespace is given + releases, err := proxy.ListReleases(app1.Namespace) + if err != nil { + t.Fatalf("Unexpected error %v", err) + } + if len(releases) != 1 { + t.Errorf("It should return both releases") + } + if !reflect.DeepEqual([]AppOverview{app1}, releases) { + t.Errorf("Unexpected list of releases %v", releases) + } +} + +func TestResolveManifest(t *testing.T) { + ns := "myns" + chartName := "bar" + version := "v1.0.0" + ch := &chart.Chart{ + Metadata: &chart.Metadata{Name: chartName, Version: version}, + } + proxy := newFakeProxy([]AppOverview{}) + + manifest, err := proxy.ResolveManifest(ns, "", ch) + if err != nil { + t.Fatalf("Unexpected error %v", err) + } + if !strings.Contains(manifest, "apiVersion") || !strings.Contains(manifest, "kind") { + t.Errorf("%s doesn't contain a manifest", manifest) + } + if strings.HasPrefix(manifest, "\n") { + t.Error("The manifest should not contain new lines at the beginning") + } +} + +func TestCreateHelmRelease(t *testing.T) { + ns := "myns" + rs := "foo" + chartName := "bar" + version := "v1.0.0" + ch := &chart.Chart{ + Metadata: &chart.Metadata{Name: chartName, Version: version}, + } + proxy := newFakeProxy([]AppOverview{}) + + result, err := proxy.CreateRelease(rs, ns, "", ch) + if err != nil { + t.Errorf("Unexpected error %v", err) + } + if result.Name != rs { + t.Errorf("Expected release named %s received %s", rs, result.Name) + } + if result.Namespace != ns { + t.Errorf("Expected release in namespace %s received %s", ns, result.Namespace) + } + // We cannot check that the rest of the chart properties as properly set + // because the fake InstallReleaseFromChart ignores the given chart +} + +func TestCreateConflictingHelmRelease(t *testing.T) { + ns := "myns" + rs := "foo" + chartName := "bar" + version := "v1.0.0" + ch := &chart.Chart{ + Metadata: &chart.Metadata{Name: chartName, Version: version}, + } + ns2 := "other_ns" + app := AppOverview{rs, version, ns2} + proxy := newFakeProxy([]AppOverview{app}) + + _, err := proxy.CreateRelease(rs, ns, "", ch) + if err == nil { + t.Error("Release should fail, an existing release in a different namespace already exists") + } + if !strings.Contains(err.Error(), "name that is still in use") { + t.Errorf("Unexpected error %v", err) + } +} + +func TestHelmReleaseUpdated(t *testing.T) { + ns := "myns" + rs := "foo" + chartName := "bar" + version := "v1.0.0" + ch := &chart.Chart{ + Metadata: &chart.Metadata{Name: chartName, Version: version}, + } + app := AppOverview{rs, version, ns} + proxy := newFakeProxy([]AppOverview{app}) + + result, err := proxy.UpdateRelease(rs, ns, "", ch) + if err != nil { + t.Errorf("Unexpected error %v", err) + } + if result.Name != rs { + t.Errorf("Expected release named %s received %s", rs, result.Name) + } + if result.Namespace != ns { + t.Errorf("Expected release in namespace %s received %s", ns, result.Namespace) + } + rels, err := proxy.helmClient.ListReleases() + if err != nil { + t.Errorf("Unexpected error %v", err) + } + // We cannot test that the release content changes because fake UpdateReleaseResponse + // does not modify the release + if len(rels.Releases) != 1 { + t.Errorf("Unexpected amount of releases %d, it should update the existing one", len(rels.Releases)) + } +} + +func TestUpdateMissingHelmRelease(t *testing.T) { + ns := "myns" + rs := "foo" + chartName := "bar" + version := "v1.0.0" + ch := &chart.Chart{ + Metadata: &chart.Metadata{Name: chartName, Version: version}, + } + // Simulate the same app but in a different namespace + ns2 := "other_ns" + app := AppOverview{rs, version, ns2} + proxy := newFakeProxy([]AppOverview{app}) + + _, err := proxy.UpdateRelease(rs, ns, "", ch) + if err == nil { + t.Error("Update should fail, there is not a release in the namespace specified") + } + if !strings.Contains(err.Error(), "not found") { + t.Errorf("Unexpected error %v", err) + } +} + +func TestGetHelmRelease(t *testing.T) { + app1 := AppOverview{"foo", "1.0.0", "my_ns"} + app2 := AppOverview{"bar", "1.0.0", "other_ns"} + type testStruct struct { + existingApps []AppOverview + shouldFail bool + targetApp string + tartegNamespace string + expectedResult string + } + tests := []testStruct{ + {[]AppOverview{app1, app2}, false, "foo", "my_ns", "foo"}, + {[]AppOverview{app1, app2}, true, "bar", "my_ns", ""}, + {[]AppOverview{app1, app2}, true, "foobar", "my_ns", ""}, + {[]AppOverview{app1, app2}, false, "foo", "", "foo"}, + } + for _, test := range tests { + proxy := newFakeProxy(test.existingApps) + res, err := proxy.GetRelease(test.targetApp, test.tartegNamespace) + if test.shouldFail && err == nil { + t.Errorf("Get %s/%s should fail", test.tartegNamespace, test.targetApp) + } + if !test.shouldFail { + if err != nil { + t.Errorf("Unexpected error %v", err) + } + if res.Name != test.expectedResult { + t.Errorf("Expecting app %s, received %s", test.expectedResult, res.Name) + } + } + } +} + +func TestHelmReleaseDeleted(t *testing.T) { + app := AppOverview{"foo", "1.0.0", "my_ns"} + proxy := newFakeProxy([]AppOverview{app}) + + err := proxy.DeleteRelease(app.ReleaseName, app.Namespace) + if err != nil { + t.Errorf("Unexpected error %v", err) + } + rels, err := proxy.helmClient.ListReleases() + if err != nil { + t.Errorf("Unexpected error %v", err) + } + if len(rels.Releases) != 0 { + t.Errorf("Unexpected amount of releases %d, it should be empty", len(rels.Releases)) + } +} + +func TestDeleteMissingHelmRelease(t *testing.T) { + app := AppOverview{"foo", "1.0.0", "my_ns"} + proxy := newFakeProxy([]AppOverview{app}) + + err := proxy.DeleteRelease(app.ReleaseName, "other_ns") + if err == nil { + t.Error("Delete should fail, there is not a release in the namespace specified") + } + rels, err := proxy.helmClient.ListReleases() + if err != nil { + t.Errorf("Unexpected error %v", err) + } + if len(rels.Releases) != 1 { + t.Errorf("Unexpected amount of releases %d, it contain a release", len(rels.Releases)) + } +} + +func TestEnsureThreadSafety(t *testing.T) { + ns := "myns" + rs := "foo" + chartName := "bar" + version := "v1.0.0" + ch := &chart.Chart{ + Metadata: &chart.Metadata{Name: chartName, Version: version}, + } + proxy := newFakeProxy([]AppOverview{}) + finish := make(chan struct{}) + type test func() + phases := []test{ + func() { + // Create first element + result, err := proxy.CreateRelease(rs, ns, "", ch) + if err != nil { + t.Errorf("Unexpected error %v", err) + } + if result.Name != rs { + t.Errorf("Expected release named %s received %s", rs, result.Name) + } + }, + func() { + // Try to create it again + _, err := proxy.CreateRelease(rs, ns, "", ch) + if err == nil { + t.Errorf("Should fail with 'already exists'") + } + }, + func() { + _, err := proxy.UpdateRelease(rs, ns, "", ch) + if err != nil { + t.Errorf("Unexpected error %v", err) + } + }, + func() { + err := proxy.DeleteRelease(rs, ns) + if err != nil { + t.Errorf("Unexpected error %v", err) + } + finish <- struct{}{} + }, + } + for _, phase := range phases { + // Run all phases in parallel + go phase() + // Give minimum time for phase to block + time.Sleep(1 * time.Millisecond) + } + <-finish +} diff --git a/pkg/yaml/yaml.go b/pkg/yaml/yaml.go new file mode 100644 index 00000000000..95369c6e3df --- /dev/null +++ b/pkg/yaml/yaml.go @@ -0,0 +1,57 @@ +/* +Copyright (c) 2018 Bitnami + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package yaml + +import ( + "bufio" + "io" + "strings" + + "github.com/ksonnet/kubecfg/utils" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/yaml" +) + +// ParseObjects returns an Unstructured object list based on the content of a YAML manifest +func ParseObjects(manifest string) ([]*unstructured.Unstructured, error) { + r := strings.NewReader(manifest) + decoder := yaml.NewYAMLReader(bufio.NewReader(r)) + ret := []runtime.Object{} + for { + bytes, err := decoder.Read() + if err == io.EOF { + break + } else if err != nil { + return nil, err + } + if len(bytes) == 0 { + continue + } + jsondata, err := yaml.ToJSON(bytes) + if err != nil { + return nil, err + } + obj, _, err := unstructured.UnstructuredJSONScheme.Decode(jsondata, nil, nil) + if err != nil { + return nil, err + } + ret = append(ret, obj) + } + + return utils.FlattenToV1(ret), nil +} diff --git a/pkg/yaml/yaml_test.go b/pkg/yaml/yaml_test.go new file mode 100644 index 00000000000..030df61d993 --- /dev/null +++ b/pkg/yaml/yaml_test.go @@ -0,0 +1,60 @@ +/* +Copyright (c) 2018 Bitnami + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package yaml + +import ( + "testing" +) + +func TestParseObjectsSuccess(t *testing.T) { + m1 := `apiVersion: v1 +kind: Namespace +metadata: + annotations: {} + labels: + name: kubeless + name: kubeless` + rs, err := ParseObjects(m1) + if err != nil { + t.Error(err) + } + if len(rs) != 1 { + t.Errorf("Expected 1 yaml element, got %v", len(rs)) + } + + // validate some fields of the parsed object + if rs[0].GetAPIVersion() != "v1" { + t.Errorf("Expected apiversion=v1, go %s", rs[0].GetAPIVersion()) + } + if rs[0].GetKind() != "Namespace" { + t.Errorf("Expected kind = Namespace, go %s", rs[0].GetKind()) + } +} + +func TestParseObjectFailure(t *testing.T) { + m2 := `apiVersion: v1 +kind: Namespace +metadata: + annotations: {} + labels: + name: kubeless + name: kubeless` + _, err := ParseObjects(m2) + if err == nil { + t.Error("Expected parse fail, got success") + } +} diff --git a/vendor/cloud.google.com/go/CONTRIBUTORS b/vendor/cloud.google.com/go/CONTRIBUTORS index eb55275bf0d..3b3cbed98e9 100644 --- a/vendor/cloud.google.com/go/CONTRIBUTORS +++ b/vendor/cloud.google.com/go/CONTRIBUTORS @@ -22,6 +22,7 @@ David Symonds Filippo Valsorda Glenn Lewis Ingo Oeser +James Hall Johan Euphrosine Jonathan Amsterdam Kunpei Sakai diff --git a/vendor/github.com/Masterminds/semver/.travis.yml b/vendor/github.com/Masterminds/semver/.travis.yml index 09ccf0e44b9..3d9ebadb933 100644 --- a/vendor/github.com/Masterminds/semver/.travis.yml +++ b/vendor/github.com/Masterminds/semver/.travis.yml @@ -1,9 +1,11 @@ language: go go: - - 1.6 - - 1.7 - - 1.8 + - 1.6.x + - 1.7.x + - 1.8.x + - 1.9.x + - 1.10.x - tip # Setting sudo access to false will let Travis CI use containers rather than @@ -13,8 +15,8 @@ go: sudo: false script: - - GO15VENDOREXPERIMENT=1 make setup - - GO15VENDOREXPERIMENT=1 make test + - make setup + - make test notifications: webhooks: diff --git a/vendor/github.com/Masterminds/semver/CHANGELOG.md b/vendor/github.com/Masterminds/semver/CHANGELOG.md index ba84c546759..b888e20abaa 100644 --- a/vendor/github.com/Masterminds/semver/CHANGELOG.md +++ b/vendor/github.com/Masterminds/semver/CHANGELOG.md @@ -1,3 +1,17 @@ +# 1.4.2 (2018-04-10) + +## Changed +- #72: Updated the docs to point to vert for a console appliaction +- #71: Update the docs on pre-release comparator handling + +## Fixed +- #70: Fix the handling of pre-releases and the 0.0.0 release edge case + +# 1.4.1 (2018-04-02) + +## Fixed +- Fixed #64: Fix pre-release precedence issue (thanks @uudashr) + # 1.4.0 (2017-10-04) ## Changed diff --git a/vendor/github.com/Masterminds/semver/version.go b/vendor/github.com/Masterminds/semver/version.go index b29c593f239..9d22ea6308d 100644 --- a/vendor/github.com/Masterminds/semver/version.go +++ b/vendor/github.com/Masterminds/semver/version.go @@ -379,16 +379,15 @@ func comparePrePart(s, o string) int { // When s or o are empty we can use the other in an attempt to determine // the response. - if o == "" { - _, n := strconv.ParseInt(s, 10, 64) - if n != nil { + if s == "" { + if o != "" { return -1 } return 1 } - if s == "" { - _, n := strconv.ParseInt(o, 10, 64) - if n != nil { + + if o == "" { + if s != "" { return 1 } return -1 diff --git a/vendor/github.com/beorn7/perks/quantile/stream.go b/vendor/github.com/beorn7/perks/quantile/stream.go index f4cabd66956..d7d14f8eb63 100644 --- a/vendor/github.com/beorn7/perks/quantile/stream.go +++ b/vendor/github.com/beorn7/perks/quantile/stream.go @@ -77,15 +77,20 @@ func NewHighBiased(epsilon float64) *Stream { // is guaranteed to be within (Quantile±Epsilon). // // See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error properties. -func NewTargeted(targets map[float64]float64) *Stream { +func NewTargeted(targetMap map[float64]float64) *Stream { + // Convert map to slice to avoid slow iterations on a map. + // ƒ is called on the hot path, so converting the map to a slice + // beforehand results in significant CPU savings. + targets := targetMapToSlice(targetMap) + ƒ := func(s *stream, r float64) float64 { var m = math.MaxFloat64 var f float64 - for quantile, epsilon := range targets { - if quantile*s.n <= r { - f = (2 * epsilon * r) / quantile + for _, t := range targets { + if t.quantile*s.n <= r { + f = (2 * t.epsilon * r) / t.quantile } else { - f = (2 * epsilon * (s.n - r)) / (1 - quantile) + f = (2 * t.epsilon * (s.n - r)) / (1 - t.quantile) } if f < m { m = f @@ -96,6 +101,25 @@ func NewTargeted(targets map[float64]float64) *Stream { return newStream(ƒ) } +type target struct { + quantile float64 + epsilon float64 +} + +func targetMapToSlice(targetMap map[float64]float64) []target { + targets := make([]target, 0, len(targetMap)) + + for quantile, epsilon := range targetMap { + t := target{ + quantile: quantile, + epsilon: epsilon, + } + targets = append(targets, t) + } + + return targets +} + // Stream computes quantiles for a stream of float64s. It is not thread-safe by // design. Take care when using across multiple goroutines. type Stream struct { diff --git a/vendor/github.com/dgrijalva/jwt-go/README.md b/vendor/github.com/dgrijalva/jwt-go/README.md index 25aec486c63..d358d881b8d 100644 --- a/vendor/github.com/dgrijalva/jwt-go/README.md +++ b/vendor/github.com/dgrijalva/jwt-go/README.md @@ -1,11 +1,15 @@ -A [go](http://www.golang.org) (or 'golang' for search engine friendliness) implementation of [JSON Web Tokens](http://self-issued.info/docs/draft-ietf-oauth-json-web-token.html) +# jwt-go [![Build Status](https://travis-ci.org/dgrijalva/jwt-go.svg?branch=master)](https://travis-ci.org/dgrijalva/jwt-go) +[![GoDoc](https://godoc.org/github.com/dgrijalva/jwt-go?status.svg)](https://godoc.org/github.com/dgrijalva/jwt-go) + +A [go](http://www.golang.org) (or 'golang' for search engine friendliness) implementation of [JSON Web Tokens](http://self-issued.info/docs/draft-ietf-oauth-json-web-token.html) -**BREAKING CHANGES:*** Version 3.0.0 is here. It includes _a lot_ of changes including a few that break the API. We've tried to break as few things as possible, so there should just be a few type signature changes. A full list of breaking changes is available in `VERSION_HISTORY.md`. See `MIGRATION_GUIDE.md` for more information on updating your code. +**NEW VERSION COMING:** There have been a lot of improvements suggested since the version 3.0.0 released in 2016. I'm working now on cutting two different releases: 3.2.0 will contain any non-breaking changes or enhancements. 4.0.0 will follow shortly which will include breaking changes. See the 4.0.0 milestone to get an idea of what's coming. If you have other ideas, or would like to participate in 4.0.0, now's the time. If you depend on this library and don't want to be interrupted, I recommend you use your dependency mangement tool to pin to version 3. -**NOTICE:** It's important that you [validate the `alg` presented is what you expect](https://auth0.com/blog/2015/03/31/critical-vulnerabilities-in-json-web-token-libraries/). This library attempts to make it easy to do the right thing by requiring key types match the expected alg, but you should take the extra step to verify it in your usage. See the examples provided. +**SECURITY NOTICE:** Some older versions of Go have a security issue in the cryotp/elliptic. Recommendation is to upgrade to at least 1.8.3. See issue #216 for more detail. +**SECURITY NOTICE:** It's important that you [validate the `alg` presented is what you expect](https://auth0.com/blog/2015/03/31/critical-vulnerabilities-in-json-web-token-libraries/). This library attempts to make it easy to do the right thing by requiring key types match the expected alg, but you should take the extra step to verify it in your usage. See the examples provided. ## What the heck is a JWT? @@ -37,7 +41,7 @@ Here's an example of an extension that integrates with the Google App Engine sig ## Compliance -This library was last reviewed to comply with [RTF 7519](http://www.rfc-editor.org/info/rfc7519) dated May 2015 with a few notable differences: +This library was last reviewed to comply with [RTF 7519](http://www.rfc-editor.org/info/rfc7519) dated May 2015 with a few notable differences: * In order to protect against accidental use of [Unsecured JWTs](http://self-issued.info/docs/draft-ietf-oauth-json-web-token.html#UnsecuredJWT), tokens using `alg=none` will only be accepted if the constant `jwt.UnsafeAllowNoneSignatureType` is provided as the key. @@ -47,7 +51,10 @@ This library is considered production ready. Feedback and feature requests are This project uses [Semantic Versioning 2.0.0](http://semver.org). Accepted pull requests will land on `master`. Periodically, versions will be tagged from `master`. You can find all the releases on [the project releases page](https://github.com/dgrijalva/jwt-go/releases). -While we try to make it obvious when we make breaking changes, there isn't a great mechanism for pushing announcements out to users. You may want to use this alternative package include: `gopkg.in/dgrijalva/jwt-go.v2`. It will do the right thing WRT semantic versioning. +While we try to make it obvious when we make breaking changes, there isn't a great mechanism for pushing announcements out to users. You may want to use this alternative package include: `gopkg.in/dgrijalva/jwt-go.v3`. It will do the right thing WRT semantic versioning. + +**BREAKING CHANGES:*** +* Version 3.0.0 includes _a lot_ of changes from the 2.x line, including a few that break the API. We've tried to break as few things as possible, so there should just be a few type signature changes. A full list of breaking changes is available in `VERSION_HISTORY.md`. See `MIGRATION_GUIDE.md` for more information on updating your code. ## Usage Tips @@ -68,6 +75,14 @@ Symmetric signing methods, such as HSA, use only a single secret. This is probab Asymmetric signing methods, such as RSA, use different keys for signing and verifying tokens. This makes it possible to produce tokens with a private key, and allow any consumer to access the public key for verification. +### Signing Methods and Key Types + +Each signing method expects a different object type for its signing keys. See the package documentation for details. Here are the most common ones: + +* The [HMAC signing method](https://godoc.org/github.com/dgrijalva/jwt-go#SigningMethodHMAC) (`HS256`,`HS384`,`HS512`) expect `[]byte` values for signing and validation +* The [RSA signing method](https://godoc.org/github.com/dgrijalva/jwt-go#SigningMethodRSA) (`RS256`,`RS384`,`RS512`) expect `*rsa.PrivateKey` for signing and `*rsa.PublicKey` for validation +* The [ECDSA signing method](https://godoc.org/github.com/dgrijalva/jwt-go#SigningMethodECDSA) (`ES256`,`ES384`,`ES512`) expect `*ecdsa.PrivateKey` for signing and `*ecdsa.PublicKey` for validation + ### JWT and OAuth It's worth mentioning that OAuth and JWT are not the same thing. A JWT token is simply a signed JSON object. It can be used anywhere such a thing is useful. There is some confusion, though, as JWT is the most common type of bearer token used in OAuth2 authentication. @@ -77,7 +92,7 @@ Without going too far down the rabbit hole, here's a description of the interact * OAuth is a protocol for allowing an identity provider to be separate from the service a user is logging in to. For example, whenever you use Facebook to log into a different service (Yelp, Spotify, etc), you are using OAuth. * OAuth defines several options for passing around authentication data. One popular method is called a "bearer token". A bearer token is simply a string that _should_ only be held by an authenticated user. Thus, simply presenting this token proves your identity. You can probably derive from here why a JWT might make a good bearer token. * Because bearer tokens are used for authentication, it's important they're kept secret. This is why transactions that use bearer tokens typically happen over SSL. - + ## More Documentation can be found [on godoc.org](http://godoc.org/github.com/dgrijalva/jwt-go). diff --git a/vendor/github.com/dgrijalva/jwt-go/VERSION_HISTORY.md b/vendor/github.com/dgrijalva/jwt-go/VERSION_HISTORY.md index c21551f6bbd..6370298313a 100644 --- a/vendor/github.com/dgrijalva/jwt-go/VERSION_HISTORY.md +++ b/vendor/github.com/dgrijalva/jwt-go/VERSION_HISTORY.md @@ -1,5 +1,12 @@ ## `jwt-go` Version History +#### 3.2.0 + +* Added method `ParseUnverified` to allow users to split up the tasks of parsing and validation +* HMAC signing method returns `ErrInvalidKeyType` instead of `ErrInvalidKey` where appropriate +* Added options to `request.ParseFromRequest`, which allows for an arbitrary list of modifiers to parsing behavior. Initial set include `WithClaims` and `WithParser`. Existing usage of this function will continue to work as before. +* Deprecated `ParseFromRequestWithClaims` to simplify API in the future. + #### 3.1.0 * Improvements to `jwt` command line tool diff --git a/vendor/github.com/dgrijalva/jwt-go/ecdsa.go b/vendor/github.com/dgrijalva/jwt-go/ecdsa.go index 2f59a222363..f977381240e 100644 --- a/vendor/github.com/dgrijalva/jwt-go/ecdsa.go +++ b/vendor/github.com/dgrijalva/jwt-go/ecdsa.go @@ -14,6 +14,7 @@ var ( ) // Implements the ECDSA family of signing methods signing methods +// Expects *ecdsa.PrivateKey for signing and *ecdsa.PublicKey for verification type SigningMethodECDSA struct { Name string Hash crypto.Hash diff --git a/vendor/github.com/dgrijalva/jwt-go/hmac.go b/vendor/github.com/dgrijalva/jwt-go/hmac.go index c2299192545..addbe5d4018 100644 --- a/vendor/github.com/dgrijalva/jwt-go/hmac.go +++ b/vendor/github.com/dgrijalva/jwt-go/hmac.go @@ -7,6 +7,7 @@ import ( ) // Implements the HMAC-SHA family of signing methods signing methods +// Expects key type of []byte for both signing and validation type SigningMethodHMAC struct { Name string Hash crypto.Hash @@ -90,5 +91,5 @@ func (m *SigningMethodHMAC) Sign(signingString string, key interface{}) (string, return EncodeSegment(hasher.Sum(nil)), nil } - return "", ErrInvalidKey + return "", ErrInvalidKeyType } diff --git a/vendor/github.com/dgrijalva/jwt-go/parser.go b/vendor/github.com/dgrijalva/jwt-go/parser.go index 7bf1c4ea084..d6901d9adb5 100644 --- a/vendor/github.com/dgrijalva/jwt-go/parser.go +++ b/vendor/github.com/dgrijalva/jwt-go/parser.go @@ -21,55 +21,9 @@ func (p *Parser) Parse(tokenString string, keyFunc Keyfunc) (*Token, error) { } func (p *Parser) ParseWithClaims(tokenString string, claims Claims, keyFunc Keyfunc) (*Token, error) { - parts := strings.Split(tokenString, ".") - if len(parts) != 3 { - return nil, NewValidationError("token contains an invalid number of segments", ValidationErrorMalformed) - } - - var err error - token := &Token{Raw: tokenString} - - // parse Header - var headerBytes []byte - if headerBytes, err = DecodeSegment(parts[0]); err != nil { - if strings.HasPrefix(strings.ToLower(tokenString), "bearer ") { - return token, NewValidationError("tokenstring should not contain 'bearer '", ValidationErrorMalformed) - } - return token, &ValidationError{Inner: err, Errors: ValidationErrorMalformed} - } - if err = json.Unmarshal(headerBytes, &token.Header); err != nil { - return token, &ValidationError{Inner: err, Errors: ValidationErrorMalformed} - } - - // parse Claims - var claimBytes []byte - token.Claims = claims - - if claimBytes, err = DecodeSegment(parts[1]); err != nil { - return token, &ValidationError{Inner: err, Errors: ValidationErrorMalformed} - } - dec := json.NewDecoder(bytes.NewBuffer(claimBytes)) - if p.UseJSONNumber { - dec.UseNumber() - } - // JSON Decode. Special case for map type to avoid weird pointer behavior - if c, ok := token.Claims.(MapClaims); ok { - err = dec.Decode(&c) - } else { - err = dec.Decode(&claims) - } - // Handle decode error + token, parts, err := p.ParseUnverified(tokenString, claims) if err != nil { - return token, &ValidationError{Inner: err, Errors: ValidationErrorMalformed} - } - - // Lookup signature method - if method, ok := token.Header["alg"].(string); ok { - if token.Method = GetSigningMethod(method); token.Method == nil { - return token, NewValidationError("signing method (alg) is unavailable.", ValidationErrorUnverifiable) - } - } else { - return token, NewValidationError("signing method (alg) is unspecified.", ValidationErrorUnverifiable) + return token, err } // Verify signing method is in the required set @@ -96,6 +50,9 @@ func (p *Parser) ParseWithClaims(tokenString string, claims Claims, keyFunc Keyf } if key, err = keyFunc(token); err != nil { // keyFunc returned an error + if ve, ok := err.(*ValidationError); ok { + return token, ve + } return token, &ValidationError{Inner: err, Errors: ValidationErrorUnverifiable} } @@ -129,3 +86,63 @@ func (p *Parser) ParseWithClaims(tokenString string, claims Claims, keyFunc Keyf return token, vErr } + +// WARNING: Don't use this method unless you know what you're doing +// +// This method parses the token but doesn't validate the signature. It's only +// ever useful in cases where you know the signature is valid (because it has +// been checked previously in the stack) and you want to extract values from +// it. +func (p *Parser) ParseUnverified(tokenString string, claims Claims) (token *Token, parts []string, err error) { + parts = strings.Split(tokenString, ".") + if len(parts) != 3 { + return nil, parts, NewValidationError("token contains an invalid number of segments", ValidationErrorMalformed) + } + + token = &Token{Raw: tokenString} + + // parse Header + var headerBytes []byte + if headerBytes, err = DecodeSegment(parts[0]); err != nil { + if strings.HasPrefix(strings.ToLower(tokenString), "bearer ") { + return token, parts, NewValidationError("tokenstring should not contain 'bearer '", ValidationErrorMalformed) + } + return token, parts, &ValidationError{Inner: err, Errors: ValidationErrorMalformed} + } + if err = json.Unmarshal(headerBytes, &token.Header); err != nil { + return token, parts, &ValidationError{Inner: err, Errors: ValidationErrorMalformed} + } + + // parse Claims + var claimBytes []byte + token.Claims = claims + + if claimBytes, err = DecodeSegment(parts[1]); err != nil { + return token, parts, &ValidationError{Inner: err, Errors: ValidationErrorMalformed} + } + dec := json.NewDecoder(bytes.NewBuffer(claimBytes)) + if p.UseJSONNumber { + dec.UseNumber() + } + // JSON Decode. Special case for map type to avoid weird pointer behavior + if c, ok := token.Claims.(MapClaims); ok { + err = dec.Decode(&c) + } else { + err = dec.Decode(&claims) + } + // Handle decode error + if err != nil { + return token, parts, &ValidationError{Inner: err, Errors: ValidationErrorMalformed} + } + + // Lookup signature method + if method, ok := token.Header["alg"].(string); ok { + if token.Method = GetSigningMethod(method); token.Method == nil { + return token, parts, NewValidationError("signing method (alg) is unavailable.", ValidationErrorUnverifiable) + } + } else { + return token, parts, NewValidationError("signing method (alg) is unspecified.", ValidationErrorUnverifiable) + } + + return token, parts, nil +} diff --git a/vendor/github.com/dgrijalva/jwt-go/rsa.go b/vendor/github.com/dgrijalva/jwt-go/rsa.go index 0ae0b1984e5..e4caf1ca4a1 100644 --- a/vendor/github.com/dgrijalva/jwt-go/rsa.go +++ b/vendor/github.com/dgrijalva/jwt-go/rsa.go @@ -7,6 +7,7 @@ import ( ) // Implements the RSA family of signing methods signing methods +// Expects *rsa.PrivateKey for signing and *rsa.PublicKey for validation type SigningMethodRSA struct { Name string Hash crypto.Hash @@ -44,7 +45,7 @@ func (m *SigningMethodRSA) Alg() string { } // Implements the Verify method from SigningMethod -// For this signing method, must be an rsa.PublicKey structure. +// For this signing method, must be an *rsa.PublicKey structure. func (m *SigningMethodRSA) Verify(signingString, signature string, key interface{}) error { var err error @@ -73,7 +74,7 @@ func (m *SigningMethodRSA) Verify(signingString, signature string, key interface } // Implements the Sign method from SigningMethod -// For this signing method, must be an rsa.PrivateKey structure. +// For this signing method, must be an *rsa.PrivateKey structure. func (m *SigningMethodRSA) Sign(signingString string, key interface{}) (string, error) { var rsaKey *rsa.PrivateKey var ok bool diff --git a/vendor/github.com/dgrijalva/jwt-go/rsa_utils.go b/vendor/github.com/dgrijalva/jwt-go/rsa_utils.go index 213a90dbbf8..a5ababf956c 100644 --- a/vendor/github.com/dgrijalva/jwt-go/rsa_utils.go +++ b/vendor/github.com/dgrijalva/jwt-go/rsa_utils.go @@ -39,6 +39,38 @@ func ParseRSAPrivateKeyFromPEM(key []byte) (*rsa.PrivateKey, error) { return pkey, nil } +// Parse PEM encoded PKCS1 or PKCS8 private key protected with password +func ParseRSAPrivateKeyFromPEMWithPassword(key []byte, password string) (*rsa.PrivateKey, error) { + var err error + + // Parse PEM block + var block *pem.Block + if block, _ = pem.Decode(key); block == nil { + return nil, ErrKeyMustBePEMEncoded + } + + var parsedKey interface{} + + var blockDecrypted []byte + if blockDecrypted, err = x509.DecryptPEMBlock(block, []byte(password)); err != nil { + return nil, err + } + + if parsedKey, err = x509.ParsePKCS1PrivateKey(blockDecrypted); err != nil { + if parsedKey, err = x509.ParsePKCS8PrivateKey(blockDecrypted); err != nil { + return nil, err + } + } + + var pkey *rsa.PrivateKey + var ok bool + if pkey, ok = parsedKey.(*rsa.PrivateKey); !ok { + return nil, ErrNotRSAPrivateKey + } + + return pkey, nil +} + // Parse PEM encoded PKCS1 or PKCS8 public key func ParseRSAPublicKeyFromPEM(key []byte) (*rsa.PublicKey, error) { var err error diff --git a/vendor/github.com/disintegration/imaging/.travis.yml b/vendor/github.com/disintegration/imaging/.travis.yml index 24dd1e4640b..89370edcbad 100644 --- a/vendor/github.com/disintegration/imaging/.travis.yml +++ b/vendor/github.com/disintegration/imaging/.travis.yml @@ -1,12 +1,9 @@ language: go - -sudo: false - go: - - "1.7" - - "1.8" - - "1.9" - - "1.10" + - "1.7.x" + - "1.8.x" + - "1.9.x" + - "1.10.x" before_install: - go get github.com/mattn/goveralls diff --git a/vendor/github.com/disintegration/imaging/README.md b/vendor/github.com/disintegration/imaging/README.md index 34c31c00253..c7ee30fc85d 100644 --- a/vendor/github.com/disintegration/imaging/README.md +++ b/vendor/github.com/disintegration/imaging/README.md @@ -2,9 +2,10 @@ [![GoDoc](https://godoc.org/github.com/disintegration/imaging?status.svg)](https://godoc.org/github.com/disintegration/imaging) [![Build Status](https://travis-ci.org/disintegration/imaging.svg?branch=master)](https://travis-ci.org/disintegration/imaging) -[![Coverage Status](https://coveralls.io/repos/github/disintegration/imaging/badge.svg?branch=master)](https://coveralls.io/github/disintegration/imaging?branch=master) +[![Coverage Status](https://coveralls.io/repos/github/disintegration/imaging/badge.svg?branch=master&service=github)](https://coveralls.io/github/disintegration/imaging?branch=master) +[![Go Report Card](https://goreportcard.com/badge/github.com/disintegration/imaging)](https://goreportcard.com/report/github.com/disintegration/imaging) -Package imaging provides basic imaging processing functions (resize, rotate, crop, brightness/contrast adjustments, etc.). +Package imaging provides basic image processing functions (resize, rotate, crop, brightness/contrast adjustments, etc.). All the image processing functions provided by the package accept any image type that implements `image.Image` interface as an input, and return a new image of `*image.NRGBA` type (32bit RGBA colors, not premultiplied by alpha). diff --git a/vendor/github.com/disintegration/imaging/adjust.go b/vendor/github.com/disintegration/imaging/adjust.go index 8ebc931f7f3..fb3a9ce3c29 100644 --- a/vendor/github.com/disintegration/imaging/adjust.go +++ b/vendor/github.com/disintegration/imaging/adjust.go @@ -51,7 +51,7 @@ func Invert(img image.Image) *image.NRGBA { // AdjustContrast changes the contrast of the image using the percentage parameter and returns the adjusted image. // The percentage must be in range (-100, 100). The percentage = 0 gives the original image. -// The percentage = -100 gives solid grey image. +// The percentage = -100 gives solid gray image. // // Examples: // diff --git a/vendor/github.com/disintegration/imaging/doc.go b/vendor/github.com/disintegration/imaging/doc.go index dad4b9c8954..5d59b46e249 100644 --- a/vendor/github.com/disintegration/imaging/doc.go +++ b/vendor/github.com/disintegration/imaging/doc.go @@ -1,5 +1,5 @@ /* -Package imaging provides basic imaging processing functions (resize, rotate, crop, brightness/contrast adjustments, etc.). +Package imaging provides basic image processing functions (resize, rotate, crop, brightness/contrast adjustments, etc.). All the image processing functions provided by the package accept any image type that implements image.Image interface as an input, and return a new image of *image.NRGBA type (32bit RGBA colors, not premultiplied by alpha). diff --git a/vendor/github.com/disintegration/imaging/helpers.go b/vendor/github.com/disintegration/imaging/helpers.go index b54db5f4cde..dcb4d7ebbd2 100644 --- a/vendor/github.com/disintegration/imaging/helpers.go +++ b/vendor/github.com/disintegration/imaging/helpers.go @@ -1,6 +1,7 @@ package imaging import ( + "bytes" "errors" "image" "image/color" @@ -46,6 +47,26 @@ func (f Format) String() string { } } +var formatFromExt = map[string]Format{ + ".jpg": JPEG, + ".jpeg": JPEG, + ".png": PNG, + ".tif": TIFF, + ".tiff": TIFF, + ".bmp": BMP, + ".gif": GIF, +} + +// FormatFromFilename parses image format from filename extension: +// "jpg" (or "jpeg"), "png", "gif", "tif" (or "tiff") and "bmp" are supported. +func FormatFromFilename(filename string) (Format, error) { + ext := strings.ToLower(filepath.Ext(filename)) + if f, ok := formatFromExt[ext]; ok { + return f, nil + } + return -1, ErrUnsupportedFormat +} + var ( // ErrUnsupportedFormat means the given image format (or file extension) is unsupported. ErrUnsupportedFormat = errors.New("imaging: unsupported image format") @@ -80,17 +101,19 @@ func Open(filename string) (image.Image, error) { } type encodeConfig struct { - jpegQuality int - gifNumColors int - gifQuantizer draw.Quantizer - gifDrawer draw.Drawer + jpegQuality int + gifNumColors int + gifQuantizer draw.Quantizer + gifDrawer draw.Drawer + pngCompressionLevel png.CompressionLevel } var defaultEncodeConfig = encodeConfig{ - jpegQuality: 95, - gifNumColors: 256, - gifQuantizer: nil, - gifDrawer: nil, + jpegQuality: 95, + gifNumColors: 256, + gifQuantizer: nil, + gifDrawer: nil, + pngCompressionLevel: png.DefaultCompression, } // EncodeOption sets an optional parameter for the Encode and Save functions. @@ -128,6 +151,14 @@ func GIFDrawer(drawer draw.Drawer) EncodeOption { } } +// PNGCompressionLevel returns an EncodeOption that sets the compression level +// of the PNG-encoded image. Default is png.DefaultCompression. +func PNGCompressionLevel(level png.CompressionLevel) EncodeOption { + return func(c *encodeConfig) { + c.pngCompressionLevel = level + } +} + // Encode writes the image img to w in the specified format (JPEG, PNG, GIF, TIFF or BMP). func Encode(w io.Writer, img image.Image, format Format, opts ...EncodeOption) error { cfg := defaultEncodeConfig @@ -155,17 +186,22 @@ func Encode(w io.Writer, img image.Image, format Format, opts ...EncodeOption) e } case PNG: - err = png.Encode(w, img) + enc := png.Encoder{CompressionLevel: cfg.pngCompressionLevel} + err = enc.Encode(w, img) + case GIF: err = gif.Encode(w, img, &gif.Options{ NumColors: cfg.gifNumColors, Quantizer: cfg.gifQuantizer, Drawer: cfg.gifDrawer, }) + case TIFF: err = tiff.Encode(w, img, &tiff.Options{Compression: tiff.Deflate, Predictor: true}) + case BMP: err = bmp.Encode(w, img) + default: err = ErrUnsupportedFormat } @@ -184,22 +220,10 @@ func Encode(w io.Writer, img image.Image, format Format, opts ...EncodeOption) e // err := imaging.Save(img, "out.jpg", imaging.JPEGQuality(80)) // func Save(img image.Image, filename string, opts ...EncodeOption) (err error) { - formats := map[string]Format{ - ".jpg": JPEG, - ".jpeg": JPEG, - ".png": PNG, - ".tif": TIFF, - ".tiff": TIFF, - ".bmp": BMP, - ".gif": GIF, - } - - ext := strings.ToLower(filepath.Ext(filename)) - f, ok := formats[ext] - if !ok { - return ErrUnsupportedFormat + f, err := FormatFromFilename(filename) + if err != nil { + return err } - file, err := fs.Create(filename) if err != nil { return err @@ -221,33 +245,16 @@ func New(width, height int, fillColor color.Color) *image.NRGBA { return &image.NRGBA{} } - dst := image.NewNRGBA(image.Rect(0, 0, width, height)) c := color.NRGBAModel.Convert(fillColor).(color.NRGBA) - - if c.R == 0 && c.G == 0 && c.B == 0 && c.A == 0 { - return dst + if (c == color.NRGBA{0, 0, 0, 0}) { + return image.NewNRGBA(image.Rect(0, 0, width, height)) } - // Fill the first row. - i := 0 - for x := 0; x < width; x++ { - dst.Pix[i+0] = c.R - dst.Pix[i+1] = c.G - dst.Pix[i+2] = c.B - dst.Pix[i+3] = c.A - i += 4 + return &image.NRGBA{ + Pix: bytes.Repeat([]byte{c.R, c.G, c.B, c.A}, width*height), + Stride: 4 * width, + Rect: image.Rect(0, 0, width, height), } - - // Copy the first row to other rows. - size := width * 4 - parallel(1, height, func(ys <-chan int) { - for y := range ys { - i = y * dst.Stride - copy(dst.Pix[i:i+size], dst.Pix[0:size]) - } - }) - - return dst } // Clone returns a copy of the given image. diff --git a/vendor/github.com/disintegration/imaging/resize.go b/vendor/github.com/disintegration/imaging/resize.go index 2ed65a2139b..97f498a0a5b 100644 --- a/vendor/github.com/disintegration/imaging/resize.go +++ b/vendor/github.com/disintegration/imaging/resize.go @@ -178,18 +178,15 @@ func resizeNearest(img image.Image, width, height int) *image.NRGBA { dx := float64(img.Bounds().Dx()) / float64(width) dy := float64(img.Bounds().Dy()) / float64(height) - if height < img.Bounds().Dy() { + if dx > 1 && dy > 1 { src := newScanner(img) parallel(0, height, func(ys <-chan int) { - scanLine := make([]uint8, src.w*4) for y := range ys { srcY := int((float64(y) + 0.5) * dy) - src.scan(0, srcY, src.w, srcY+1, scanLine) dstOff := y * dst.Stride for x := 0; x < width; x++ { srcX := int((float64(x) + 0.5) * dx) - srcOff := srcX * 4 - copy(dst.Pix[dstOff:dstOff+4], scanLine[srcOff:srcOff+4]) + src.scan(srcX, srcY, srcX+1, srcY+1, dst.Pix[dstOff:dstOff+4]) dstOff += 4 } } diff --git a/vendor/github.com/emicklei/go-restful/CHANGES.md b/vendor/github.com/emicklei/go-restful/CHANGES.md index d23b5755538..7294637cd93 100644 --- a/vendor/github.com/emicklei/go-restful/CHANGES.md +++ b/vendor/github.com/emicklei/go-restful/CHANGES.md @@ -1,5 +1,8 @@ Change history of go-restful = +v2.6.1 +- add JSONNewDecoderFunc to allow custom JSON Decoder usage (go 1.10+) + v2.6.0 - Make JSR 311 routing and path param processing consistent - Adding description to RouteBuilder.Reads() diff --git a/vendor/github.com/emicklei/go-restful/README.md b/vendor/github.com/emicklei/go-restful/README.md index 002a08d9656..65c3d4f9d04 100644 --- a/vendor/github.com/emicklei/go-restful/README.md +++ b/vendor/github.com/emicklei/go-restful/README.md @@ -61,8 +61,20 @@ func (u UserResource) findUser(request *restful.Request, response *restful.Respo - Route errors produce HTTP 404/405/406/415 errors, customizable using ServiceErrorHandler(...) - Configurable (trace) logging - Customizable gzip/deflate readers and writers using CompressorProvider registration - -### Resources + +## How to customize +There are several hooks to customize the behavior of the go-restful package. + +- Router algorithm +- Panic recovery +- JSON decoder +- Trace logging +- Compression +- Encoders for other serializers + +TODO: write examples of these. + +## Resources - [Example posted on blog](http://ernestmicklei.com/2012/11/go-restful-first-working-example/) - [Design explained on blog](http://ernestmicklei.com/2012/11/go-restful-api-design/) @@ -72,4 +84,4 @@ func (u UserResource) findUser(request *restful.Request, response *restful.Respo Type ```git shortlog -s``` for a full list of contributors. -© 2012 - 2017, http://ernestmicklei.com. MIT License. Contributions are welcome. +© 2012 - 2018, http://ernestmicklei.com. MIT License. Contributions are welcome. diff --git a/vendor/github.com/emicklei/go-restful/entity_accessors.go b/vendor/github.com/emicklei/go-restful/entity_accessors.go index 6ecf6c7f897..42957055ff8 100644 --- a/vendor/github.com/emicklei/go-restful/entity_accessors.go +++ b/vendor/github.com/emicklei/go-restful/entity_accessors.go @@ -7,6 +7,7 @@ package restful import ( "encoding/json" "encoding/xml" + "io" "strings" "sync" ) @@ -126,11 +127,16 @@ type entityJSONAccess struct { ContentType string } +// JSONNewDecoderFunc can be used to inject a different configration for the json Decoder instance. +var JSONNewDecoderFunc = func(r io.Reader) *json.Decoder { + decoder := json.NewDecoder(r) + decoder.UseNumber() + return decoder +} + // Read unmarshalls the value from JSON func (e entityJSONAccess) Read(req *Request, v interface{}) error { - decoder := json.NewDecoder(req.Request.Body) - decoder.UseNumber() - return decoder.Decode(v) + return JSONNewDecoderFunc(req.Request.Body).Decode(v) } // Write marshalls the value to JSON and set the Content-Type Header. diff --git a/vendor/github.com/emicklei/go-restful/go.mod b/vendor/github.com/emicklei/go-restful/go.mod new file mode 100644 index 00000000000..5fa37e774dd --- /dev/null +++ b/vendor/github.com/emicklei/go-restful/go.mod @@ -0,0 +1 @@ +module github.com/emicklei/go-restful/v2 diff --git a/vendor/github.com/go-ini/ini/.gitignore b/vendor/github.com/go-ini/ini/.gitignore deleted file mode 100644 index 12411127b39..00000000000 --- a/vendor/github.com/go-ini/ini/.gitignore +++ /dev/null @@ -1,6 +0,0 @@ -testdata/conf_out.ini -ini.sublime-project -ini.sublime-workspace -testdata/conf_reflect.ini -.idea -/.vscode diff --git a/vendor/github.com/go-ini/ini/.travis.yml b/vendor/github.com/go-ini/ini/.travis.yml deleted file mode 100644 index 75fe7b74b94..00000000000 --- a/vendor/github.com/go-ini/ini/.travis.yml +++ /dev/null @@ -1,16 +0,0 @@ -sudo: false -language: go -go: - - 1.5.x - - 1.6.x - - 1.7.x - - 1.8.x - - 1.9.x - - 1.10.x - -script: - - go get golang.org/x/tools/cmd/cover - - go get github.com/smartystreets/goconvey - - mkdir -p $HOME/gopath/src/gopkg.in - - ln -s $HOME/gopath/src/github.com/go-ini/ini $HOME/gopath/src/gopkg.in/ini.v1 - - go test -v -cover -race diff --git a/vendor/github.com/go-ini/ini/LICENSE b/vendor/github.com/go-ini/ini/LICENSE deleted file mode 100644 index d361bbcdf5c..00000000000 --- a/vendor/github.com/go-ini/ini/LICENSE +++ /dev/null @@ -1,191 +0,0 @@ -Apache License -Version 2.0, January 2004 -http://www.apache.org/licenses/ - -TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - -1. Definitions. - -"License" shall mean the terms and conditions for use, reproduction, and -distribution as defined by Sections 1 through 9 of this document. - -"Licensor" shall mean the copyright owner or entity authorized by the copyright -owner that is granting the License. - -"Legal Entity" shall mean the union of the acting entity and all other entities -that control, are controlled by, or are under common control with that entity. -For the purposes of this definition, "control" means (i) the power, direct or -indirect, to cause the direction or management of such entity, whether by -contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the -outstanding shares, or (iii) beneficial ownership of such entity. - -"You" (or "Your") shall mean an individual or Legal Entity exercising -permissions granted by this License. - -"Source" form shall mean the preferred form for making modifications, including -but not limited to software source code, documentation source, and configuration -files. - -"Object" form shall mean any form resulting from mechanical transformation or -translation of a Source form, including but not limited to compiled object code, -generated documentation, and conversions to other media types. - -"Work" shall mean the work of authorship, whether in Source or Object form, made -available under the License, as indicated by a copyright notice that is included -in or attached to the work (an example is provided in the Appendix below). - -"Derivative Works" shall mean any work, whether in Source or Object form, that -is based on (or derived from) the Work and for which the editorial revisions, -annotations, elaborations, or other modifications represent, as a whole, an -original work of authorship. For the purposes of this License, Derivative Works -shall not include works that remain separable from, or merely link (or bind by -name) to the interfaces of, the Work and Derivative Works thereof. - -"Contribution" shall mean any work of authorship, including the original version -of the Work and any modifications or additions to that Work or Derivative Works -thereof, that is intentionally submitted to Licensor for inclusion in the Work -by the copyright owner or by an individual or Legal Entity authorized to submit -on behalf of the copyright owner. For the purposes of this definition, -"submitted" means any form of electronic, verbal, or written communication sent -to the Licensor or its representatives, including but not limited to -communication on electronic mailing lists, source code control systems, and -issue tracking systems that are managed by, or on behalf of, the Licensor for -the purpose of discussing and improving the Work, but excluding communication -that is conspicuously marked or otherwise designated in writing by the copyright -owner as "Not a Contribution." - -"Contributor" shall mean Licensor and any individual or Legal Entity on behalf -of whom a Contribution has been received by Licensor and subsequently -incorporated within the Work. - -2. Grant of Copyright License. - -Subject to the terms and conditions of this License, each Contributor hereby -grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, -irrevocable copyright license to reproduce, prepare Derivative Works of, -publicly display, publicly perform, sublicense, and distribute the Work and such -Derivative Works in Source or Object form. - -3. Grant of Patent License. - -Subject to the terms and conditions of this License, each Contributor hereby -grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, -irrevocable (except as stated in this section) patent license to make, have -made, use, offer to sell, sell, import, and otherwise transfer the Work, where -such license applies only to those patent claims licensable by such Contributor -that are necessarily infringed by their Contribution(s) alone or by combination -of their Contribution(s) with the Work to which such Contribution(s) was -submitted. If You institute patent litigation against any entity (including a -cross-claim or counterclaim in a lawsuit) alleging that the Work or a -Contribution incorporated within the Work constitutes direct or contributory -patent infringement, then any patent licenses granted to You under this License -for that Work shall terminate as of the date such litigation is filed. - -4. Redistribution. - -You may reproduce and distribute copies of the Work or Derivative Works thereof -in any medium, with or without modifications, and in Source or Object form, -provided that You meet the following conditions: - -You must give any other recipients of the Work or Derivative Works a copy of -this License; and -You must cause any modified files to carry prominent notices stating that You -changed the files; and -You must retain, in the Source form of any Derivative Works that You distribute, -all copyright, patent, trademark, and attribution notices from the Source form -of the Work, excluding those notices that do not pertain to any part of the -Derivative Works; and -If the Work includes a "NOTICE" text file as part of its distribution, then any -Derivative Works that You distribute must include a readable copy of the -attribution notices contained within such NOTICE file, excluding those notices -that do not pertain to any part of the Derivative Works, in at least one of the -following places: within a NOTICE text file distributed as part of the -Derivative Works; within the Source form or documentation, if provided along -with the Derivative Works; or, within a display generated by the Derivative -Works, if and wherever such third-party notices normally appear. The contents of -the NOTICE file are for informational purposes only and do not modify the -License. You may add Your own attribution notices within Derivative Works that -You distribute, alongside or as an addendum to the NOTICE text from the Work, -provided that such additional attribution notices cannot be construed as -modifying the License. -You may add Your own copyright statement to Your modifications and may provide -additional or different license terms and conditions for use, reproduction, or -distribution of Your modifications, or for any such Derivative Works as a whole, -provided Your use, reproduction, and distribution of the Work otherwise complies -with the conditions stated in this License. - -5. Submission of Contributions. - -Unless You explicitly state otherwise, any Contribution intentionally submitted -for inclusion in the Work by You to the Licensor shall be under the terms and -conditions of this License, without any additional terms or conditions. -Notwithstanding the above, nothing herein shall supersede or modify the terms of -any separate license agreement you may have executed with Licensor regarding -such Contributions. - -6. Trademarks. - -This License does not grant permission to use the trade names, trademarks, -service marks, or product names of the Licensor, except as required for -reasonable and customary use in describing the origin of the Work and -reproducing the content of the NOTICE file. - -7. Disclaimer of Warranty. - -Unless required by applicable law or agreed to in writing, Licensor provides the -Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, -including, without limitation, any warranties or conditions of TITLE, -NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are -solely responsible for determining the appropriateness of using or -redistributing the Work and assume any risks associated with Your exercise of -permissions under this License. - -8. Limitation of Liability. - -In no event and under no legal theory, whether in tort (including negligence), -contract, or otherwise, unless required by applicable law (such as deliberate -and grossly negligent acts) or agreed to in writing, shall any Contributor be -liable to You for damages, including any direct, indirect, special, incidental, -or consequential damages of any character arising as a result of this License or -out of the use or inability to use the Work (including but not limited to -damages for loss of goodwill, work stoppage, computer failure or malfunction, or -any and all other commercial damages or losses), even if such Contributor has -been advised of the possibility of such damages. - -9. Accepting Warranty or Additional Liability. - -While redistributing the Work or Derivative Works thereof, You may choose to -offer, and charge a fee for, acceptance of support, warranty, indemnity, or -other liability obligations and/or rights consistent with this License. However, -in accepting such obligations, You may act only on Your own behalf and on Your -sole responsibility, not on behalf of any other Contributor, and only if You -agree to indemnify, defend, and hold each Contributor harmless for any liability -incurred by, or claims asserted against, such Contributor by reason of your -accepting any such warranty or additional liability. - -END OF TERMS AND CONDITIONS - -APPENDIX: How to apply the Apache License to your work - -To apply the Apache License to your work, attach the following boilerplate -notice, with the fields enclosed by brackets "[]" replaced with your own -identifying information. (Don't include the brackets!) The text should be -enclosed in the appropriate comment syntax for the file format. We also -recommend that a file or class name and description of purpose be included on -the same "printed page" as the copyright notice for easier identification within -third-party archives. - - Copyright 2014 Unknwon - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/go-ini/ini/Makefile b/vendor/github.com/go-ini/ini/Makefile deleted file mode 100644 index af27ff0768f..00000000000 --- a/vendor/github.com/go-ini/ini/Makefile +++ /dev/null @@ -1,15 +0,0 @@ -.PHONY: build test bench vet coverage - -build: vet bench - -test: - go test -v -cover -race - -bench: - go test -v -cover -race -test.bench=. -test.benchmem - -vet: - go vet - -coverage: - go test -coverprofile=c.out && go tool cover -html=c.out && rm c.out diff --git a/vendor/github.com/go-ini/ini/README.md b/vendor/github.com/go-ini/ini/README.md deleted file mode 100644 index 6b7b73fed11..00000000000 --- a/vendor/github.com/go-ini/ini/README.md +++ /dev/null @@ -1,790 +0,0 @@ -INI [![Build Status](https://travis-ci.org/go-ini/ini.svg?branch=master)](https://travis-ci.org/go-ini/ini) [![Sourcegraph](https://sourcegraph.com/github.com/go-ini/ini/-/badge.svg)](https://sourcegraph.com/github.com/go-ini/ini?badge) -=== - -![](https://avatars0.githubusercontent.com/u/10216035?v=3&s=200) - -Package ini provides INI file read and write functionality in Go. - -[简体中文](README_ZH.md) - -## Feature - -- Load multiple data sources(`[]byte`, file and `io.ReadCloser`) with overwrites. -- Read with recursion values. -- Read with parent-child sections. -- Read with auto-increment key names. -- Read with multiple-line values. -- Read with tons of helper methods. -- Read and convert values to Go types. -- Read and **WRITE** comments of sections and keys. -- Manipulate sections, keys and comments with ease. -- Keep sections and keys in order as you parse and save. - -## Installation - -To use a tagged revision: - - go get gopkg.in/ini.v1 - -To use with latest changes: - - go get github.com/go-ini/ini - -Please add `-u` flag to update in the future. - -### Testing - -If you want to test on your machine, please apply `-t` flag: - - go get -t gopkg.in/ini.v1 - -Please add `-u` flag to update in the future. - -## Getting Started - -### Loading from data sources - -A **Data Source** is either raw data in type `[]byte`, a file name with type `string` or `io.ReadCloser`. You can load **as many data sources as you want**. Passing other types will simply return an error. - -```go -cfg, err := ini.Load([]byte("raw data"), "filename", ioutil.NopCloser(bytes.NewReader([]byte("some other data")))) -``` - -Or start with an empty object: - -```go -cfg := ini.Empty() -``` - -When you cannot decide how many data sources to load at the beginning, you will still be able to **Append()** them later. - -```go -err := cfg.Append("other file", []byte("other raw data")) -``` - -If you have a list of files with possibilities that some of them may not available at the time, and you don't know exactly which ones, you can use `LooseLoad` to ignore nonexistent files without returning error. - -```go -cfg, err := ini.LooseLoad("filename", "filename_404") -``` - -The cool thing is, whenever the file is available to load while you're calling `Reload` method, it will be counted as usual. - -#### Ignore cases of key name - -When you do not care about cases of section and key names, you can use `InsensitiveLoad` to force all names to be lowercased while parsing. - -```go -cfg, err := ini.InsensitiveLoad("filename") -//... - -// sec1 and sec2 are the exactly same section object -sec1, err := cfg.GetSection("Section") -sec2, err := cfg.GetSection("SecTIOn") - -// key1 and key2 are the exactly same key object -key1, err := sec1.GetKey("Key") -key2, err := sec2.GetKey("KeY") -``` - -#### MySQL-like boolean key - -MySQL's configuration allows a key without value as follows: - -```ini -[mysqld] -... -skip-host-cache -skip-name-resolve -``` - -By default, this is considered as missing value. But if you know you're going to deal with those cases, you can assign advanced load options: - -```go -cfg, err := ini.LoadSources(ini.LoadOptions{AllowBooleanKeys: true}, "my.cnf")) -``` - -The value of those keys are always `true`, and when you save to a file, it will keep in the same foramt as you read. - -To generate such keys in your program, you could use `NewBooleanKey`: - -```go -key, err := sec.NewBooleanKey("skip-host-cache") -``` - -#### Comment - -Take care that following format will be treated as comment: - -1. Line begins with `#` or `;` -2. Words after `#` or `;` -3. Words after section name (i.e words after `[some section name]`) - -If you want to save a value with `#` or `;`, please quote them with ``` ` ``` or ``` """ ```. - -Alternatively, you can use following `LoadOptions` to completely ignore inline comments: - -```go -cfg, err := ini.LoadSources(ini.LoadOptions{IgnoreInlineComment: true}, "app.ini")) -``` - -### Working with sections - -To get a section, you would need to: - -```go -section, err := cfg.GetSection("section name") -``` - -For a shortcut for default section, just give an empty string as name: - -```go -section, err := cfg.GetSection("") -``` - -When you're pretty sure the section exists, following code could make your life easier: - -```go -section := cfg.Section("section name") -``` - -What happens when the section somehow does not exist? Don't panic, it automatically creates and returns a new section to you. - -To create a new section: - -```go -err := cfg.NewSection("new section") -``` - -To get a list of sections or section names: - -```go -sections := cfg.Sections() -names := cfg.SectionStrings() -``` - -### Working with keys - -To get a key under a section: - -```go -key, err := cfg.Section("").GetKey("key name") -``` - -Same rule applies to key operations: - -```go -key := cfg.Section("").Key("key name") -``` - -To check if a key exists: - -```go -yes := cfg.Section("").HasKey("key name") -``` - -To create a new key: - -```go -err := cfg.Section("").NewKey("name", "value") -``` - -To get a list of keys or key names: - -```go -keys := cfg.Section("").Keys() -names := cfg.Section("").KeyStrings() -``` - -To get a clone hash of keys and corresponding values: - -```go -hash := cfg.Section("").KeysHash() -``` - -### Working with values - -To get a string value: - -```go -val := cfg.Section("").Key("key name").String() -``` - -To validate key value on the fly: - -```go -val := cfg.Section("").Key("key name").Validate(func(in string) string { - if len(in) == 0 { - return "default" - } - return in -}) -``` - -If you do not want any auto-transformation (such as recursive read) for the values, you can get raw value directly (this way you get much better performance): - -```go -val := cfg.Section("").Key("key name").Value() -``` - -To check if raw value exists: - -```go -yes := cfg.Section("").HasValue("test value") -``` - -To get value with types: - -```go -// For boolean values: -// true when value is: 1, t, T, TRUE, true, True, YES, yes, Yes, y, ON, on, On -// false when value is: 0, f, F, FALSE, false, False, NO, no, No, n, OFF, off, Off -v, err = cfg.Section("").Key("BOOL").Bool() -v, err = cfg.Section("").Key("FLOAT64").Float64() -v, err = cfg.Section("").Key("INT").Int() -v, err = cfg.Section("").Key("INT64").Int64() -v, err = cfg.Section("").Key("UINT").Uint() -v, err = cfg.Section("").Key("UINT64").Uint64() -v, err = cfg.Section("").Key("TIME").TimeFormat(time.RFC3339) -v, err = cfg.Section("").Key("TIME").Time() // RFC3339 - -v = cfg.Section("").Key("BOOL").MustBool() -v = cfg.Section("").Key("FLOAT64").MustFloat64() -v = cfg.Section("").Key("INT").MustInt() -v = cfg.Section("").Key("INT64").MustInt64() -v = cfg.Section("").Key("UINT").MustUint() -v = cfg.Section("").Key("UINT64").MustUint64() -v = cfg.Section("").Key("TIME").MustTimeFormat(time.RFC3339) -v = cfg.Section("").Key("TIME").MustTime() // RFC3339 - -// Methods start with Must also accept one argument for default value -// when key not found or fail to parse value to given type. -// Except method MustString, which you have to pass a default value. - -v = cfg.Section("").Key("String").MustString("default") -v = cfg.Section("").Key("BOOL").MustBool(true) -v = cfg.Section("").Key("FLOAT64").MustFloat64(1.25) -v = cfg.Section("").Key("INT").MustInt(10) -v = cfg.Section("").Key("INT64").MustInt64(99) -v = cfg.Section("").Key("UINT").MustUint(3) -v = cfg.Section("").Key("UINT64").MustUint64(6) -v = cfg.Section("").Key("TIME").MustTimeFormat(time.RFC3339, time.Now()) -v = cfg.Section("").Key("TIME").MustTime(time.Now()) // RFC3339 -``` - -What if my value is three-line long? - -```ini -[advance] -ADDRESS = """404 road, -NotFound, State, 5000 -Earth""" -``` - -Not a problem! - -```go -cfg.Section("advance").Key("ADDRESS").String() - -/* --- start --- -404 road, -NotFound, State, 5000 -Earth ------- end --- */ -``` - -That's cool, how about continuation lines? - -```ini -[advance] -two_lines = how about \ - continuation lines? -lots_of_lines = 1 \ - 2 \ - 3 \ - 4 -``` - -Piece of cake! - -```go -cfg.Section("advance").Key("two_lines").String() // how about continuation lines? -cfg.Section("advance").Key("lots_of_lines").String() // 1 2 3 4 -``` - -Well, I hate continuation lines, how do I disable that? - -```go -cfg, err := ini.LoadSources(ini.LoadOptions{ - IgnoreContinuation: true, -}, "filename") -``` - -Holy crap! - -Note that single quotes around values will be stripped: - -```ini -foo = "some value" // foo: some value -bar = 'some value' // bar: some value -``` - -Sometimes you downloaded file from [Crowdin](https://crowdin.com/) has values like the following (value is surrounded by double quotes and quotes in the value are escaped): - -```ini -create_repo="created repository %s" -``` - -How do you transform this to regular format automatically? - -```go -cfg, err := ini.LoadSources(ini.LoadOptions{UnescapeValueDoubleQuotes: true}, "en-US.ini")) -cfg.Section("").Key("create_repo").String() -// You got: created repository %s -``` - -That's all? Hmm, no. - -#### Helper methods of working with values - -To get value with given candidates: - -```go -v = cfg.Section("").Key("STRING").In("default", []string{"str", "arr", "types"}) -v = cfg.Section("").Key("FLOAT64").InFloat64(1.1, []float64{1.25, 2.5, 3.75}) -v = cfg.Section("").Key("INT").InInt(5, []int{10, 20, 30}) -v = cfg.Section("").Key("INT64").InInt64(10, []int64{10, 20, 30}) -v = cfg.Section("").Key("UINT").InUint(4, []int{3, 6, 9}) -v = cfg.Section("").Key("UINT64").InUint64(8, []int64{3, 6, 9}) -v = cfg.Section("").Key("TIME").InTimeFormat(time.RFC3339, time.Now(), []time.Time{time1, time2, time3}) -v = cfg.Section("").Key("TIME").InTime(time.Now(), []time.Time{time1, time2, time3}) // RFC3339 -``` - -Default value will be presented if value of key is not in candidates you given, and default value does not need be one of candidates. - -To validate value in a given range: - -```go -vals = cfg.Section("").Key("FLOAT64").RangeFloat64(0.0, 1.1, 2.2) -vals = cfg.Section("").Key("INT").RangeInt(0, 10, 20) -vals = cfg.Section("").Key("INT64").RangeInt64(0, 10, 20) -vals = cfg.Section("").Key("UINT").RangeUint(0, 3, 9) -vals = cfg.Section("").Key("UINT64").RangeUint64(0, 3, 9) -vals = cfg.Section("").Key("TIME").RangeTimeFormat(time.RFC3339, time.Now(), minTime, maxTime) -vals = cfg.Section("").Key("TIME").RangeTime(time.Now(), minTime, maxTime) // RFC3339 -``` - -##### Auto-split values into a slice - -To use zero value of type for invalid inputs: - -```go -// Input: 1.1, 2.2, 3.3, 4.4 -> [1.1 2.2 3.3 4.4] -// Input: how, 2.2, are, you -> [0.0 2.2 0.0 0.0] -vals = cfg.Section("").Key("STRINGS").Strings(",") -vals = cfg.Section("").Key("FLOAT64S").Float64s(",") -vals = cfg.Section("").Key("INTS").Ints(",") -vals = cfg.Section("").Key("INT64S").Int64s(",") -vals = cfg.Section("").Key("UINTS").Uints(",") -vals = cfg.Section("").Key("UINT64S").Uint64s(",") -vals = cfg.Section("").Key("TIMES").Times(",") -``` - -To exclude invalid values out of result slice: - -```go -// Input: 1.1, 2.2, 3.3, 4.4 -> [1.1 2.2 3.3 4.4] -// Input: how, 2.2, are, you -> [2.2] -vals = cfg.Section("").Key("FLOAT64S").ValidFloat64s(",") -vals = cfg.Section("").Key("INTS").ValidInts(",") -vals = cfg.Section("").Key("INT64S").ValidInt64s(",") -vals = cfg.Section("").Key("UINTS").ValidUints(",") -vals = cfg.Section("").Key("UINT64S").ValidUint64s(",") -vals = cfg.Section("").Key("TIMES").ValidTimes(",") -``` - -Or to return nothing but error when have invalid inputs: - -```go -// Input: 1.1, 2.2, 3.3, 4.4 -> [1.1 2.2 3.3 4.4] -// Input: how, 2.2, are, you -> error -vals = cfg.Section("").Key("FLOAT64S").StrictFloat64s(",") -vals = cfg.Section("").Key("INTS").StrictInts(",") -vals = cfg.Section("").Key("INT64S").StrictInt64s(",") -vals = cfg.Section("").Key("UINTS").StrictUints(",") -vals = cfg.Section("").Key("UINT64S").StrictUint64s(",") -vals = cfg.Section("").Key("TIMES").StrictTimes(",") -``` - -### Save your configuration - -Finally, it's time to save your configuration to somewhere. - -A typical way to save configuration is writing it to a file: - -```go -// ... -err = cfg.SaveTo("my.ini") -err = cfg.SaveToIndent("my.ini", "\t") -``` - -Another way to save is writing to a `io.Writer` interface: - -```go -// ... -cfg.WriteTo(writer) -cfg.WriteToIndent(writer, "\t") -``` - -By default, spaces are used to align "=" sign between key and values, to disable that: - -```go -ini.PrettyFormat = false -``` - -## Advanced Usage - -### Recursive Values - -For all value of keys, there is a special syntax `%()s`, where `` is the key name in same section or default section, and `%()s` will be replaced by corresponding value(empty string if key not found). You can use this syntax at most 99 level of recursions. - -```ini -NAME = ini - -[author] -NAME = Unknwon -GITHUB = https://github.com/%(NAME)s - -[package] -FULL_NAME = github.com/go-ini/%(NAME)s -``` - -```go -cfg.Section("author").Key("GITHUB").String() // https://github.com/Unknwon -cfg.Section("package").Key("FULL_NAME").String() // github.com/go-ini/ini -``` - -### Parent-child Sections - -You can use `.` in section name to indicate parent-child relationship between two or more sections. If the key not found in the child section, library will try again on its parent section until there is no parent section. - -```ini -NAME = ini -VERSION = v1 -IMPORT_PATH = gopkg.in/%(NAME)s.%(VERSION)s - -[package] -CLONE_URL = https://%(IMPORT_PATH)s - -[package.sub] -``` - -```go -cfg.Section("package.sub").Key("CLONE_URL").String() // https://gopkg.in/ini.v1 -``` - -#### Retrieve parent keys available to a child section - -```go -cfg.Section("package.sub").ParentKeys() // ["CLONE_URL"] -``` - -### Same Key with Multiple Values - -Do you ever have a configuration file like this? - -```ini -[remote "origin"] -url = https://github.com/Antergone/test1.git -url = https://github.com/Antergone/test2.git -fetch = +refs/heads/*:refs/remotes/origin/* -``` - -By default, only the last read value will be kept for the key `url`. If you want to keep all copies of value of this key, you can use `ShadowLoad` to achieve it: - -```go -cfg, err := ini.ShadowLoad(".gitconfig") -// ... - -f.Section(`remote "origin"`).Key("url").String() -// Result: https://github.com/Antergone/test1.git - -f.Section(`remote "origin"`).Key("url").ValueWithShadows() -// Result: []string{ -// "https://github.com/Antergone/test1.git", -// "https://github.com/Antergone/test2.git", -// } -``` - -### Unparseable Sections - -Sometimes, you have sections that do not contain key-value pairs but raw content, to handle such case, you can use `LoadOptions.UnparsableSections`: - -```go -cfg, err := ini.LoadSources(ini.LoadOptions{UnparseableSections: []string{"COMMENTS"}}, `[COMMENTS] -<1> This slide has the fuel listed in the wrong units `)) - -body := cfg.Section("COMMENTS").Body() - -/* --- start --- -<1> This slide has the fuel listed in the wrong units ------- end --- */ -``` - -### Auto-increment Key Names - -If key name is `-` in data source, then it would be seen as special syntax for auto-increment key name start from 1, and every section is independent on counter. - -```ini -[features] --: Support read/write comments of keys and sections --: Support auto-increment of key names --: Support load multiple files to overwrite key values -``` - -```go -cfg.Section("features").KeyStrings() // []{"#1", "#2", "#3"} -``` - -### Map To Struct - -Want more objective way to play with INI? Cool. - -```ini -Name = Unknwon -age = 21 -Male = true -Born = 1993-01-01T20:17:05Z - -[Note] -Content = Hi is a good man! -Cities = HangZhou, Boston -``` - -```go -type Note struct { - Content string - Cities []string -} - -type Person struct { - Name string - Age int `ini:"age"` - Male bool - Born time.Time - Note - Created time.Time `ini:"-"` -} - -func main() { - cfg, err := ini.Load("path/to/ini") - // ... - p := new(Person) - err = cfg.MapTo(p) - // ... - - // Things can be simpler. - err = ini.MapTo(p, "path/to/ini") - // ... - - // Just map a section? Fine. - n := new(Note) - err = cfg.Section("Note").MapTo(n) - // ... -} -``` - -Can I have default value for field? Absolutely. - -Assign it before you map to struct. It will keep the value as it is if the key is not presented or got wrong type. - -```go -// ... -p := &Person{ - Name: "Joe", -} -// ... -``` - -It's really cool, but what's the point if you can't give me my file back from struct? - -### Reflect From Struct - -Why not? - -```go -type Embeded struct { - Dates []time.Time `delim:"|" comment:"Time data"` - Places []string `ini:"places,omitempty"` - None []int `ini:",omitempty"` -} - -type Author struct { - Name string `ini:"NAME"` - Male bool - Age int `comment:"Author's age"` - GPA float64 - NeverMind string `ini:"-"` - *Embeded `comment:"Embeded section"` -} - -func main() { - a := &Author{"Unknwon", true, 21, 2.8, "", - &Embeded{ - []time.Time{time.Now(), time.Now()}, - []string{"HangZhou", "Boston"}, - []int{}, - }} - cfg := ini.Empty() - err = ini.ReflectFrom(cfg, a) - // ... -} -``` - -So, what do I get? - -```ini -NAME = Unknwon -Male = true -; Author's age -Age = 21 -GPA = 2.8 - -; Embeded section -[Embeded] -; Time data -Dates = 2015-08-07T22:14:22+08:00|2015-08-07T22:14:22+08:00 -places = HangZhou,Boston -``` - -#### Name Mapper - -To save your time and make your code cleaner, this library supports [`NameMapper`](https://gowalker.org/gopkg.in/ini.v1#NameMapper) between struct field and actual section and key name. - -There are 2 built-in name mappers: - -- `AllCapsUnderscore`: it converts to format `ALL_CAPS_UNDERSCORE` then match section or key. -- `TitleUnderscore`: it converts to format `title_underscore` then match section or key. - -To use them: - -```go -type Info struct { - PackageName string -} - -func main() { - err = ini.MapToWithMapper(&Info{}, ini.TitleUnderscore, []byte("package_name=ini")) - // ... - - cfg, err := ini.Load([]byte("PACKAGE_NAME=ini")) - // ... - info := new(Info) - cfg.NameMapper = ini.AllCapsUnderscore - err = cfg.MapTo(info) - // ... -} -``` - -Same rules of name mapper apply to `ini.ReflectFromWithMapper` function. - -#### Value Mapper - -To expand values (e.g. from environment variables), you can use the `ValueMapper` to transform values: - -```go -type Env struct { - Foo string `ini:"foo"` -} - -func main() { - cfg, err := ini.Load([]byte("[env]\nfoo = ${MY_VAR}\n") - cfg.ValueMapper = os.ExpandEnv - // ... - env := &Env{} - err = cfg.Section("env").MapTo(env) -} -``` - -This would set the value of `env.Foo` to the value of the environment variable `MY_VAR`. - -#### Other Notes On Map/Reflect - -Any embedded struct is treated as a section by default, and there is no automatic parent-child relations in map/reflect feature: - -```go -type Child struct { - Age string -} - -type Parent struct { - Name string - Child -} - -type Config struct { - City string - Parent -} -``` - -Example configuration: - -```ini -City = Boston - -[Parent] -Name = Unknwon - -[Child] -Age = 21 -``` - -What if, yes, I'm paranoid, I want embedded struct to be in the same section. Well, all roads lead to Rome. - -```go -type Child struct { - Age string -} - -type Parent struct { - Name string - Child `ini:"Parent"` -} - -type Config struct { - City string - Parent -} -``` - -Example configuration: - -```ini -City = Boston - -[Parent] -Name = Unknwon -Age = 21 -``` - -## Getting Help - -- [API Documentation](https://gowalker.org/gopkg.in/ini.v1) -- [File An Issue](https://github.com/go-ini/ini/issues/new) - -## FAQs - -### What does `BlockMode` field do? - -By default, library lets you read and write values so we need a locker to make sure your data is safe. But in cases that you are very sure about only reading data through the library, you can set `cfg.BlockMode = false` to speed up read operations about **50-70%** faster. - -### Why another INI library? - -Many people are using my another INI library [goconfig](https://github.com/Unknwon/goconfig), so the reason for this one is I would like to make more Go style code. Also when you set `cfg.BlockMode = false`, this one is about **10-30%** faster. - -To make those changes I have to confirm API broken, so it's safer to keep it in another place and start using `gopkg.in` to version my package at this time.(PS: shorter import path) - -## License - -This project is under Apache v2 License. See the [LICENSE](LICENSE) file for the full license text. diff --git a/vendor/github.com/go-ini/ini/README_ZH.md b/vendor/github.com/go-ini/ini/README_ZH.md deleted file mode 100644 index 67a53630204..00000000000 --- a/vendor/github.com/go-ini/ini/README_ZH.md +++ /dev/null @@ -1,777 +0,0 @@ -本包提供了 Go 语言中读写 INI 文件的功能。 - -## 功能特性 - -- 支持覆盖加载多个数据源(`[]byte`、文件和 `io.ReadCloser`) -- 支持递归读取键值 -- 支持读取父子分区 -- 支持读取自增键名 -- 支持读取多行的键值 -- 支持大量辅助方法 -- 支持在读取时直接转换为 Go 语言类型 -- 支持读取和 **写入** 分区和键的注释 -- 轻松操作分区、键值和注释 -- 在保存文件时分区和键值会保持原有的顺序 - -## 下载安装 - -使用一个特定版本: - - go get gopkg.in/ini.v1 - -使用最新版: - - go get github.com/go-ini/ini - -如需更新请添加 `-u` 选项。 - -### 测试安装 - -如果您想要在自己的机器上运行测试,请使用 `-t` 标记: - - go get -t gopkg.in/ini.v1 - -如需更新请添加 `-u` 选项。 - -## 开始使用 - -### 从数据源加载 - -一个 **数据源** 可以是 `[]byte` 类型的原始数据,`string` 类型的文件路径或 `io.ReadCloser`。您可以加载 **任意多个** 数据源。如果您传递其它类型的数据源,则会直接返回错误。 - -```go -cfg, err := ini.Load([]byte("raw data"), "filename", ioutil.NopCloser(bytes.NewReader([]byte("some other data")))) -``` - -或者从一个空白的文件开始: - -```go -cfg := ini.Empty() -``` - -当您在一开始无法决定需要加载哪些数据源时,仍可以使用 **Append()** 在需要的时候加载它们。 - -```go -err := cfg.Append("other file", []byte("other raw data")) -``` - -当您想要加载一系列文件,但是不能够确定其中哪些文件是不存在的,可以通过调用函数 `LooseLoad` 来忽略它们(`Load` 会因为文件不存在而返回错误): - -```go -cfg, err := ini.LooseLoad("filename", "filename_404") -``` - -更牛逼的是,当那些之前不存在的文件在重新调用 `Reload` 方法的时候突然出现了,那么它们会被正常加载。 - -#### 忽略键名的大小写 - -有时候分区和键的名称大小写混合非常烦人,这个时候就可以通过 `InsensitiveLoad` 将所有分区和键名在读取里强制转换为小写: - -```go -cfg, err := ini.InsensitiveLoad("filename") -//... - -// sec1 和 sec2 指向同一个分区对象 -sec1, err := cfg.GetSection("Section") -sec2, err := cfg.GetSection("SecTIOn") - -// key1 和 key2 指向同一个键对象 -key1, err := sec1.GetKey("Key") -key2, err := sec2.GetKey("KeY") -``` - -#### 类似 MySQL 配置中的布尔值键 - -MySQL 的配置文件中会出现没有具体值的布尔类型的键: - -```ini -[mysqld] -... -skip-host-cache -skip-name-resolve -``` - -默认情况下这被认为是缺失值而无法完成解析,但可以通过高级的加载选项对它们进行处理: - -```go -cfg, err := ini.LoadSources(ini.LoadOptions{AllowBooleanKeys: true}, "my.cnf")) -``` - -这些键的值永远为 `true`,且在保存到文件时也只会输出键名。 - -如果您想要通过程序来生成此类键,则可以使用 `NewBooleanKey`: - -```go -key, err := sec.NewBooleanKey("skip-host-cache") -``` - -#### 关于注释 - -下述几种情况的内容将被视为注释: - -1. 所有以 `#` 或 `;` 开头的行 -2. 所有在 `#` 或 `;` 之后的内容 -3. 分区标签后的文字 (即 `[分区名]` 之后的内容) - -如果你希望使用包含 `#` 或 `;` 的值,请使用 ``` ` ``` 或 ``` """ ``` 进行包覆。 - -除此之外,您还可以通过 `LoadOptions` 完全忽略行内注释: - -```go -cfg, err := ini.LoadSources(ini.LoadOptions{IgnoreInlineComment: true}, "app.ini")) -``` - -### 操作分区(Section) - -获取指定分区: - -```go -section, err := cfg.GetSection("section name") -``` - -如果您想要获取默认分区,则可以用空字符串代替分区名: - -```go -section, err := cfg.GetSection("") -``` - -当您非常确定某个分区是存在的,可以使用以下简便方法: - -```go -section := cfg.Section("section name") -``` - -如果不小心判断错了,要获取的分区其实是不存在的,那会发生什么呢?没事的,它会自动创建并返回一个对应的分区对象给您。 - -创建一个分区: - -```go -err := cfg.NewSection("new section") -``` - -获取所有分区对象或名称: - -```go -sections := cfg.Sections() -names := cfg.SectionStrings() -``` - -### 操作键(Key) - -获取某个分区下的键: - -```go -key, err := cfg.Section("").GetKey("key name") -``` - -和分区一样,您也可以直接获取键而忽略错误处理: - -```go -key := cfg.Section("").Key("key name") -``` - -判断某个键是否存在: - -```go -yes := cfg.Section("").HasKey("key name") -``` - -创建一个新的键: - -```go -err := cfg.Section("").NewKey("name", "value") -``` - -获取分区下的所有键或键名: - -```go -keys := cfg.Section("").Keys() -names := cfg.Section("").KeyStrings() -``` - -获取分区下的所有键值对的克隆: - -```go -hash := cfg.Section("").KeysHash() -``` - -### 操作键值(Value) - -获取一个类型为字符串(string)的值: - -```go -val := cfg.Section("").Key("key name").String() -``` - -获取值的同时通过自定义函数进行处理验证: - -```go -val := cfg.Section("").Key("key name").Validate(func(in string) string { - if len(in) == 0 { - return "default" - } - return in -}) -``` - -如果您不需要任何对值的自动转变功能(例如递归读取),可以直接获取原值(这种方式性能最佳): - -```go -val := cfg.Section("").Key("key name").Value() -``` - -判断某个原值是否存在: - -```go -yes := cfg.Section("").HasValue("test value") -``` - -获取其它类型的值: - -```go -// 布尔值的规则: -// true 当值为:1, t, T, TRUE, true, True, YES, yes, Yes, y, ON, on, On -// false 当值为:0, f, F, FALSE, false, False, NO, no, No, n, OFF, off, Off -v, err = cfg.Section("").Key("BOOL").Bool() -v, err = cfg.Section("").Key("FLOAT64").Float64() -v, err = cfg.Section("").Key("INT").Int() -v, err = cfg.Section("").Key("INT64").Int64() -v, err = cfg.Section("").Key("UINT").Uint() -v, err = cfg.Section("").Key("UINT64").Uint64() -v, err = cfg.Section("").Key("TIME").TimeFormat(time.RFC3339) -v, err = cfg.Section("").Key("TIME").Time() // RFC3339 - -v = cfg.Section("").Key("BOOL").MustBool() -v = cfg.Section("").Key("FLOAT64").MustFloat64() -v = cfg.Section("").Key("INT").MustInt() -v = cfg.Section("").Key("INT64").MustInt64() -v = cfg.Section("").Key("UINT").MustUint() -v = cfg.Section("").Key("UINT64").MustUint64() -v = cfg.Section("").Key("TIME").MustTimeFormat(time.RFC3339) -v = cfg.Section("").Key("TIME").MustTime() // RFC3339 - -// 由 Must 开头的方法名允许接收一个相同类型的参数来作为默认值, -// 当键不存在或者转换失败时,则会直接返回该默认值。 -// 但是,MustString 方法必须传递一个默认值。 - -v = cfg.Seciont("").Key("String").MustString("default") -v = cfg.Section("").Key("BOOL").MustBool(true) -v = cfg.Section("").Key("FLOAT64").MustFloat64(1.25) -v = cfg.Section("").Key("INT").MustInt(10) -v = cfg.Section("").Key("INT64").MustInt64(99) -v = cfg.Section("").Key("UINT").MustUint(3) -v = cfg.Section("").Key("UINT64").MustUint64(6) -v = cfg.Section("").Key("TIME").MustTimeFormat(time.RFC3339, time.Now()) -v = cfg.Section("").Key("TIME").MustTime(time.Now()) // RFC3339 -``` - -如果我的值有好多行怎么办? - -```ini -[advance] -ADDRESS = """404 road, -NotFound, State, 5000 -Earth""" -``` - -嗯哼?小 case! - -```go -cfg.Section("advance").Key("ADDRESS").String() - -/* --- start --- -404 road, -NotFound, State, 5000 -Earth ------- end --- */ -``` - -赞爆了!那要是我属于一行的内容写不下想要写到第二行怎么办? - -```ini -[advance] -two_lines = how about \ - continuation lines? -lots_of_lines = 1 \ - 2 \ - 3 \ - 4 -``` - -简直是小菜一碟! - -```go -cfg.Section("advance").Key("two_lines").String() // how about continuation lines? -cfg.Section("advance").Key("lots_of_lines").String() // 1 2 3 4 -``` - -可是我有时候觉得两行连在一起特别没劲,怎么才能不自动连接两行呢? - -```go -cfg, err := ini.LoadSources(ini.LoadOptions{ - IgnoreContinuation: true, -}, "filename") -``` - -哇靠给力啊! - -需要注意的是,值两侧的单引号会被自动剔除: - -```ini -foo = "some value" // foo: some value -bar = 'some value' // bar: some value -``` - -有时您会获得像从 [Crowdin](https://crowdin.com/) 网站下载的文件那样具有特殊格式的值(值使用双引号括起来,内部的双引号被转义): - -```ini -create_repo="创建了仓库 %s" -``` - -那么,怎么自动地将这类值进行处理呢? - -```go -cfg, err := ini.LoadSources(ini.LoadOptions{UnescapeValueDoubleQuotes: true}, "en-US.ini")) -cfg.Section("").Key("create_repo").String() -// You got: 创建了仓库 %s -``` - -这就是全部了?哈哈,当然不是。 - -#### 操作键值的辅助方法 - -获取键值时设定候选值: - -```go -v = cfg.Section("").Key("STRING").In("default", []string{"str", "arr", "types"}) -v = cfg.Section("").Key("FLOAT64").InFloat64(1.1, []float64{1.25, 2.5, 3.75}) -v = cfg.Section("").Key("INT").InInt(5, []int{10, 20, 30}) -v = cfg.Section("").Key("INT64").InInt64(10, []int64{10, 20, 30}) -v = cfg.Section("").Key("UINT").InUint(4, []int{3, 6, 9}) -v = cfg.Section("").Key("UINT64").InUint64(8, []int64{3, 6, 9}) -v = cfg.Section("").Key("TIME").InTimeFormat(time.RFC3339, time.Now(), []time.Time{time1, time2, time3}) -v = cfg.Section("").Key("TIME").InTime(time.Now(), []time.Time{time1, time2, time3}) // RFC3339 -``` - -如果获取到的值不是候选值的任意一个,则会返回默认值,而默认值不需要是候选值中的一员。 - -验证获取的值是否在指定范围内: - -```go -vals = cfg.Section("").Key("FLOAT64").RangeFloat64(0.0, 1.1, 2.2) -vals = cfg.Section("").Key("INT").RangeInt(0, 10, 20) -vals = cfg.Section("").Key("INT64").RangeInt64(0, 10, 20) -vals = cfg.Section("").Key("UINT").RangeUint(0, 3, 9) -vals = cfg.Section("").Key("UINT64").RangeUint64(0, 3, 9) -vals = cfg.Section("").Key("TIME").RangeTimeFormat(time.RFC3339, time.Now(), minTime, maxTime) -vals = cfg.Section("").Key("TIME").RangeTime(time.Now(), minTime, maxTime) // RFC3339 -``` - -##### 自动分割键值到切片(slice) - -当存在无效输入时,使用零值代替: - -```go -// Input: 1.1, 2.2, 3.3, 4.4 -> [1.1 2.2 3.3 4.4] -// Input: how, 2.2, are, you -> [0.0 2.2 0.0 0.0] -vals = cfg.Section("").Key("STRINGS").Strings(",") -vals = cfg.Section("").Key("FLOAT64S").Float64s(",") -vals = cfg.Section("").Key("INTS").Ints(",") -vals = cfg.Section("").Key("INT64S").Int64s(",") -vals = cfg.Section("").Key("UINTS").Uints(",") -vals = cfg.Section("").Key("UINT64S").Uint64s(",") -vals = cfg.Section("").Key("TIMES").Times(",") -``` - -从结果切片中剔除无效输入: - -```go -// Input: 1.1, 2.2, 3.3, 4.4 -> [1.1 2.2 3.3 4.4] -// Input: how, 2.2, are, you -> [2.2] -vals = cfg.Section("").Key("FLOAT64S").ValidFloat64s(",") -vals = cfg.Section("").Key("INTS").ValidInts(",") -vals = cfg.Section("").Key("INT64S").ValidInt64s(",") -vals = cfg.Section("").Key("UINTS").ValidUints(",") -vals = cfg.Section("").Key("UINT64S").ValidUint64s(",") -vals = cfg.Section("").Key("TIMES").ValidTimes(",") -``` - -当存在无效输入时,直接返回错误: - -```go -// Input: 1.1, 2.2, 3.3, 4.4 -> [1.1 2.2 3.3 4.4] -// Input: how, 2.2, are, you -> error -vals = cfg.Section("").Key("FLOAT64S").StrictFloat64s(",") -vals = cfg.Section("").Key("INTS").StrictInts(",") -vals = cfg.Section("").Key("INT64S").StrictInt64s(",") -vals = cfg.Section("").Key("UINTS").StrictUints(",") -vals = cfg.Section("").Key("UINT64S").StrictUint64s(",") -vals = cfg.Section("").Key("TIMES").StrictTimes(",") -``` - -### 保存配置 - -终于到了这个时刻,是时候保存一下配置了。 - -比较原始的做法是输出配置到某个文件: - -```go -// ... -err = cfg.SaveTo("my.ini") -err = cfg.SaveToIndent("my.ini", "\t") -``` - -另一个比较高级的做法是写入到任何实现 `io.Writer` 接口的对象中: - -```go -// ... -cfg.WriteTo(writer) -cfg.WriteToIndent(writer, "\t") -``` - -默认情况下,空格将被用于对齐键值之间的等号以美化输出结果,以下代码可以禁用该功能: - -```go -ini.PrettyFormat = false -``` - -## 高级用法 - -### 递归读取键值 - -在获取所有键值的过程中,特殊语法 `%()s` 会被应用,其中 `` 可以是相同分区或者默认分区下的键名。字符串 `%()s` 会被相应的键值所替代,如果指定的键不存在,则会用空字符串替代。您可以最多使用 99 层的递归嵌套。 - -```ini -NAME = ini - -[author] -NAME = Unknwon -GITHUB = https://github.com/%(NAME)s - -[package] -FULL_NAME = github.com/go-ini/%(NAME)s -``` - -```go -cfg.Section("author").Key("GITHUB").String() // https://github.com/Unknwon -cfg.Section("package").Key("FULL_NAME").String() // github.com/go-ini/ini -``` - -### 读取父子分区 - -您可以在分区名称中使用 `.` 来表示两个或多个分区之间的父子关系。如果某个键在子分区中不存在,则会去它的父分区中再次寻找,直到没有父分区为止。 - -```ini -NAME = ini -VERSION = v1 -IMPORT_PATH = gopkg.in/%(NAME)s.%(VERSION)s - -[package] -CLONE_URL = https://%(IMPORT_PATH)s - -[package.sub] -``` - -```go -cfg.Section("package.sub").Key("CLONE_URL").String() // https://gopkg.in/ini.v1 -``` - -#### 获取上级父分区下的所有键名 - -```go -cfg.Section("package.sub").ParentKeys() // ["CLONE_URL"] -``` - -### 同个键名包含多个值 - -你是否也曾被下面的配置文件所困扰? - -```ini -[remote "origin"] -url = https://github.com/Antergone/test1.git -url = https://github.com/Antergone/test2.git -fetch = +refs/heads/*:refs/remotes/origin/* -``` - -没错!默认情况下,只有最后一次出现的值会被保存到 `url` 中,可我就是想要保留所有的值怎么办啊?不要紧,用 `ShadowLoad` 轻松解决你的烦恼: - -```go -cfg, err := ini.ShadowLoad(".gitconfig") -// ... - -f.Section(`remote "origin"`).Key("url").String() -// Result: https://github.com/Antergone/test1.git - -f.Section(`remote "origin"`).Key("url").ValueWithShadows() -// Result: []string{ -// "https://github.com/Antergone/test1.git", -// "https://github.com/Antergone/test2.git", -// } -``` - -### 无法解析的分区 - -如果遇到一些比较特殊的分区,它们不包含常见的键值对,而是没有固定格式的纯文本,则可以使用 `LoadOptions.UnparsableSections` 进行处理: - -```go -cfg, err := LoadSources(ini.LoadOptions{UnparseableSections: []string{"COMMENTS"}}, `[COMMENTS] -<1> This slide has the fuel listed in the wrong units `)) - -body := cfg.Section("COMMENTS").Body() - -/* --- start --- -<1> This slide has the fuel listed in the wrong units ------- end --- */ -``` - -### 读取自增键名 - -如果数据源中的键名为 `-`,则认为该键使用了自增键名的特殊语法。计数器从 1 开始,并且分区之间是相互独立的。 - -```ini -[features] --: Support read/write comments of keys and sections --: Support auto-increment of key names --: Support load multiple files to overwrite key values -``` - -```go -cfg.Section("features").KeyStrings() // []{"#1", "#2", "#3"} -``` - -### 映射到结构 - -想要使用更加面向对象的方式玩转 INI 吗?好主意。 - -```ini -Name = Unknwon -age = 21 -Male = true -Born = 1993-01-01T20:17:05Z - -[Note] -Content = Hi is a good man! -Cities = HangZhou, Boston -``` - -```go -type Note struct { - Content string - Cities []string -} - -type Person struct { - Name string - Age int `ini:"age"` - Male bool - Born time.Time - Note - Created time.Time `ini:"-"` -} - -func main() { - cfg, err := ini.Load("path/to/ini") - // ... - p := new(Person) - err = cfg.MapTo(p) - // ... - - // 一切竟可以如此的简单。 - err = ini.MapTo(p, "path/to/ini") - // ... - - // 嗯哼?只需要映射一个分区吗? - n := new(Note) - err = cfg.Section("Note").MapTo(n) - // ... -} -``` - -结构的字段怎么设置默认值呢?很简单,只要在映射之前对指定字段进行赋值就可以了。如果键未找到或者类型错误,该值不会发生改变。 - -```go -// ... -p := &Person{ - Name: "Joe", -} -// ... -``` - -这样玩 INI 真的好酷啊!然而,如果不能还给我原来的配置文件,有什么卵用? - -### 从结构反射 - -可是,我有说不能吗? - -```go -type Embeded struct { - Dates []time.Time `delim:"|" comment:"Time data"` - Places []string `ini:"places,omitempty"` - None []int `ini:",omitempty"` -} - -type Author struct { - Name string `ini:"NAME"` - Male bool - Age int `comment:"Author's age"` - GPA float64 - NeverMind string `ini:"-"` - *Embeded `comment:"Embeded section"` -} - -func main() { - a := &Author{"Unknwon", true, 21, 2.8, "", - &Embeded{ - []time.Time{time.Now(), time.Now()}, - []string{"HangZhou", "Boston"}, - []int{}, - }} - cfg := ini.Empty() - err = ini.ReflectFrom(cfg, a) - // ... -} -``` - -瞧瞧,奇迹发生了。 - -```ini -NAME = Unknwon -Male = true -; Author's age -Age = 21 -GPA = 2.8 - -; Embeded section -[Embeded] -; Time data -Dates = 2015-08-07T22:14:22+08:00|2015-08-07T22:14:22+08:00 -places = HangZhou,Boston -``` - -#### 名称映射器(Name Mapper) - -为了节省您的时间并简化代码,本库支持类型为 [`NameMapper`](https://gowalker.org/gopkg.in/ini.v1#NameMapper) 的名称映射器,该映射器负责结构字段名与分区名和键名之间的映射。 - -目前有 2 款内置的映射器: - -- `AllCapsUnderscore`:该映射器将字段名转换至格式 `ALL_CAPS_UNDERSCORE` 后再去匹配分区名和键名。 -- `TitleUnderscore`:该映射器将字段名转换至格式 `title_underscore` 后再去匹配分区名和键名。 - -使用方法: - -```go -type Info struct{ - PackageName string -} - -func main() { - err = ini.MapToWithMapper(&Info{}, ini.TitleUnderscore, []byte("package_name=ini")) - // ... - - cfg, err := ini.Load([]byte("PACKAGE_NAME=ini")) - // ... - info := new(Info) - cfg.NameMapper = ini.AllCapsUnderscore - err = cfg.MapTo(info) - // ... -} -``` - -使用函数 `ini.ReflectFromWithMapper` 时也可应用相同的规则。 - -#### 值映射器(Value Mapper) - -值映射器允许使用一个自定义函数自动展开值的具体内容,例如:运行时获取环境变量: - -```go -type Env struct { - Foo string `ini:"foo"` -} - -func main() { - cfg, err := ini.Load([]byte("[env]\nfoo = ${MY_VAR}\n") - cfg.ValueMapper = os.ExpandEnv - // ... - env := &Env{} - err = cfg.Section("env").MapTo(env) -} -``` - -本例中,`env.Foo` 将会是运行时所获取到环境变量 `MY_VAR` 的值。 - -#### 映射/反射的其它说明 - -任何嵌入的结构都会被默认认作一个不同的分区,并且不会自动产生所谓的父子分区关联: - -```go -type Child struct { - Age string -} - -type Parent struct { - Name string - Child -} - -type Config struct { - City string - Parent -} -``` - -示例配置文件: - -```ini -City = Boston - -[Parent] -Name = Unknwon - -[Child] -Age = 21 -``` - -很好,但是,我就是要嵌入结构也在同一个分区。好吧,你爹是李刚! - -```go -type Child struct { - Age string -} - -type Parent struct { - Name string - Child `ini:"Parent"` -} - -type Config struct { - City string - Parent -} -``` - -示例配置文件: - -```ini -City = Boston - -[Parent] -Name = Unknwon -Age = 21 -``` - -## 获取帮助 - -- [API 文档](https://gowalker.org/gopkg.in/ini.v1) -- [创建工单](https://github.com/go-ini/ini/issues/new) - -## 常见问题 - -### 字段 `BlockMode` 是什么? - -默认情况下,本库会在您进行读写操作时采用锁机制来确保数据时间。但在某些情况下,您非常确定只进行读操作。此时,您可以通过设置 `cfg.BlockMode = false` 来将读操作提升大约 **50-70%** 的性能。 - -### 为什么要写另一个 INI 解析库? - -许多人都在使用我的 [goconfig](https://github.com/Unknwon/goconfig) 来完成对 INI 文件的操作,但我希望使用更加 Go 风格的代码。并且当您设置 `cfg.BlockMode = false` 时,会有大约 **10-30%** 的性能提升。 - -为了做出这些改变,我必须对 API 进行破坏,所以新开一个仓库是最安全的做法。除此之外,本库直接使用 `gopkg.in` 来进行版本化发布。(其实真相是导入路径更短了) diff --git a/vendor/github.com/go-ini/ini/error.go b/vendor/github.com/go-ini/ini/error.go deleted file mode 100644 index 80afe743158..00000000000 --- a/vendor/github.com/go-ini/ini/error.go +++ /dev/null @@ -1,32 +0,0 @@ -// Copyright 2016 Unknwon -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -package ini - -import ( - "fmt" -) - -type ErrDelimiterNotFound struct { - Line string -} - -func IsErrDelimiterNotFound(err error) bool { - _, ok := err.(ErrDelimiterNotFound) - return ok -} - -func (err ErrDelimiterNotFound) Error() string { - return fmt.Sprintf("key-value delimiter not found: %s", err.Line) -} diff --git a/vendor/github.com/go-ini/ini/file.go b/vendor/github.com/go-ini/ini/file.go deleted file mode 100644 index ae6264acfa2..00000000000 --- a/vendor/github.com/go-ini/ini/file.go +++ /dev/null @@ -1,403 +0,0 @@ -// Copyright 2017 Unknwon -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -package ini - -import ( - "bytes" - "errors" - "fmt" - "io" - "io/ioutil" - "os" - "strings" - "sync" -) - -// File represents a combination of a or more INI file(s) in memory. -type File struct { - options LoadOptions - dataSources []dataSource - - // Should make things safe, but sometimes doesn't matter. - BlockMode bool - lock sync.RWMutex - - // To keep data in order. - sectionList []string - // Actual data is stored here. - sections map[string]*Section - - NameMapper - ValueMapper -} - -// newFile initializes File object with given data sources. -func newFile(dataSources []dataSource, opts LoadOptions) *File { - return &File{ - BlockMode: true, - dataSources: dataSources, - sections: make(map[string]*Section), - sectionList: make([]string, 0, 10), - options: opts, - } -} - -// Empty returns an empty file object. -func Empty() *File { - // Ignore error here, we sure our data is good. - f, _ := Load([]byte("")) - return f -} - -// NewSection creates a new section. -func (f *File) NewSection(name string) (*Section, error) { - if len(name) == 0 { - return nil, errors.New("error creating new section: empty section name") - } else if f.options.Insensitive && name != DEFAULT_SECTION { - name = strings.ToLower(name) - } - - if f.BlockMode { - f.lock.Lock() - defer f.lock.Unlock() - } - - if inSlice(name, f.sectionList) { - return f.sections[name], nil - } - - f.sectionList = append(f.sectionList, name) - f.sections[name] = newSection(f, name) - return f.sections[name], nil -} - -// NewRawSection creates a new section with an unparseable body. -func (f *File) NewRawSection(name, body string) (*Section, error) { - section, err := f.NewSection(name) - if err != nil { - return nil, err - } - - section.isRawSection = true - section.rawBody = body - return section, nil -} - -// NewSections creates a list of sections. -func (f *File) NewSections(names ...string) (err error) { - for _, name := range names { - if _, err = f.NewSection(name); err != nil { - return err - } - } - return nil -} - -// GetSection returns section by given name. -func (f *File) GetSection(name string) (*Section, error) { - if len(name) == 0 { - name = DEFAULT_SECTION - } - if f.options.Insensitive { - name = strings.ToLower(name) - } - - if f.BlockMode { - f.lock.RLock() - defer f.lock.RUnlock() - } - - sec := f.sections[name] - if sec == nil { - return nil, fmt.Errorf("section '%s' does not exist", name) - } - return sec, nil -} - -// Section assumes named section exists and returns a zero-value when not. -func (f *File) Section(name string) *Section { - sec, err := f.GetSection(name) - if err != nil { - // Note: It's OK here because the only possible error is empty section name, - // but if it's empty, this piece of code won't be executed. - sec, _ = f.NewSection(name) - return sec - } - return sec -} - -// Section returns list of Section. -func (f *File) Sections() []*Section { - if f.BlockMode { - f.lock.RLock() - defer f.lock.RUnlock() - } - - sections := make([]*Section, len(f.sectionList)) - for i, name := range f.sectionList { - sections[i] = f.sections[name] - } - return sections -} - -// ChildSections returns a list of child sections of given section name. -func (f *File) ChildSections(name string) []*Section { - return f.Section(name).ChildSections() -} - -// SectionStrings returns list of section names. -func (f *File) SectionStrings() []string { - list := make([]string, len(f.sectionList)) - copy(list, f.sectionList) - return list -} - -// DeleteSection deletes a section. -func (f *File) DeleteSection(name string) { - if f.BlockMode { - f.lock.Lock() - defer f.lock.Unlock() - } - - if len(name) == 0 { - name = DEFAULT_SECTION - } - - for i, s := range f.sectionList { - if s == name { - f.sectionList = append(f.sectionList[:i], f.sectionList[i+1:]...) - delete(f.sections, name) - return - } - } -} - -func (f *File) reload(s dataSource) error { - r, err := s.ReadCloser() - if err != nil { - return err - } - defer r.Close() - - return f.parse(r) -} - -// Reload reloads and parses all data sources. -func (f *File) Reload() (err error) { - for _, s := range f.dataSources { - if err = f.reload(s); err != nil { - // In loose mode, we create an empty default section for nonexistent files. - if os.IsNotExist(err) && f.options.Loose { - f.parse(bytes.NewBuffer(nil)) - continue - } - return err - } - } - return nil -} - -// Append appends one or more data sources and reloads automatically. -func (f *File) Append(source interface{}, others ...interface{}) error { - ds, err := parseDataSource(source) - if err != nil { - return err - } - f.dataSources = append(f.dataSources, ds) - for _, s := range others { - ds, err = parseDataSource(s) - if err != nil { - return err - } - f.dataSources = append(f.dataSources, ds) - } - return f.Reload() -} - -func (f *File) writeToBuffer(indent string) (*bytes.Buffer, error) { - equalSign := "=" - if PrettyFormat { - equalSign = " = " - } - - // Use buffer to make sure target is safe until finish encoding. - buf := bytes.NewBuffer(nil) - for i, sname := range f.sectionList { - sec := f.Section(sname) - if len(sec.Comment) > 0 { - if sec.Comment[0] != '#' && sec.Comment[0] != ';' { - sec.Comment = "; " + sec.Comment - } else { - sec.Comment = sec.Comment[:1] + " " + strings.TrimSpace(sec.Comment[1:]) - } - if _, err := buf.WriteString(sec.Comment + LineBreak); err != nil { - return nil, err - } - } - - if i > 0 || DefaultHeader { - if _, err := buf.WriteString("[" + sname + "]" + LineBreak); err != nil { - return nil, err - } - } else { - // Write nothing if default section is empty - if len(sec.keyList) == 0 { - continue - } - } - - if sec.isRawSection { - if _, err := buf.WriteString(sec.rawBody); err != nil { - return nil, err - } - - if PrettySection { - // Put a line between sections - if _, err := buf.WriteString(LineBreak); err != nil { - return nil, err - } - } - continue - } - - // Count and generate alignment length and buffer spaces using the - // longest key. Keys may be modifed if they contain certain characters so - // we need to take that into account in our calculation. - alignLength := 0 - if PrettyFormat { - for _, kname := range sec.keyList { - keyLength := len(kname) - // First case will surround key by ` and second by """ - if strings.ContainsAny(kname, "\"=:") { - keyLength += 2 - } else if strings.Contains(kname, "`") { - keyLength += 6 - } - - if keyLength > alignLength { - alignLength = keyLength - } - } - } - alignSpaces := bytes.Repeat([]byte(" "), alignLength) - - KEY_LIST: - for _, kname := range sec.keyList { - key := sec.Key(kname) - if len(key.Comment) > 0 { - if len(indent) > 0 && sname != DEFAULT_SECTION { - buf.WriteString(indent) - } - if key.Comment[0] != '#' && key.Comment[0] != ';' { - key.Comment = "; " + key.Comment - } else { - key.Comment = key.Comment[:1] + " " + strings.TrimSpace(key.Comment[1:]) - } - if _, err := buf.WriteString(key.Comment + LineBreak); err != nil { - return nil, err - } - } - - if len(indent) > 0 && sname != DEFAULT_SECTION { - buf.WriteString(indent) - } - - switch { - case key.isAutoIncrement: - kname = "-" - case strings.ContainsAny(kname, "\"=:"): - kname = "`" + kname + "`" - case strings.Contains(kname, "`"): - kname = `"""` + kname + `"""` - } - - for _, val := range key.ValueWithShadows() { - if _, err := buf.WriteString(kname); err != nil { - return nil, err - } - - if key.isBooleanType { - if kname != sec.keyList[len(sec.keyList)-1] { - buf.WriteString(LineBreak) - } - continue KEY_LIST - } - - // Write out alignment spaces before "=" sign - if PrettyFormat { - buf.Write(alignSpaces[:alignLength-len(kname)]) - } - - // In case key value contains "\n", "`", "\"", "#" or ";" - if strings.ContainsAny(val, "\n`") { - val = `"""` + val + `"""` - } else if !f.options.IgnoreInlineComment && strings.ContainsAny(val, "#;") { - val = "`" + val + "`" - } - if _, err := buf.WriteString(equalSign + val + LineBreak); err != nil { - return nil, err - } - } - - for _, val := range key.nestedValues { - if _, err := buf.WriteString(indent + " " + val + LineBreak); err != nil { - return nil, err - } - } - } - - if PrettySection { - // Put a line between sections - if _, err := buf.WriteString(LineBreak); err != nil { - return nil, err - } - } - } - - return buf, nil -} - -// WriteToIndent writes content into io.Writer with given indention. -// If PrettyFormat has been set to be true, -// it will align "=" sign with spaces under each section. -func (f *File) WriteToIndent(w io.Writer, indent string) (int64, error) { - buf, err := f.writeToBuffer(indent) - if err != nil { - return 0, err - } - return buf.WriteTo(w) -} - -// WriteTo writes file content into io.Writer. -func (f *File) WriteTo(w io.Writer) (int64, error) { - return f.WriteToIndent(w, "") -} - -// SaveToIndent writes content to file system with given value indention. -func (f *File) SaveToIndent(filename, indent string) error { - // Note: Because we are truncating with os.Create, - // so it's safer to save to a temporary file location and rename afte done. - buf, err := f.writeToBuffer(indent) - if err != nil { - return err - } - - return ioutil.WriteFile(filename, buf.Bytes(), 0666) -} - -// SaveTo writes content to file system. -func (f *File) SaveTo(filename string) error { - return f.SaveToIndent(filename, "") -} diff --git a/vendor/github.com/go-ini/ini/ini.go b/vendor/github.com/go-ini/ini/ini.go deleted file mode 100644 index 9f6ea3b4157..00000000000 --- a/vendor/github.com/go-ini/ini/ini.go +++ /dev/null @@ -1,194 +0,0 @@ -// Copyright 2014 Unknwon -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -// Package ini provides INI file read and write functionality in Go. -package ini - -import ( - "bytes" - "fmt" - "io" - "io/ioutil" - "os" - "regexp" - "runtime" -) - -const ( - // Name for default section. You can use this constant or the string literal. - // In most of cases, an empty string is all you need to access the section. - DEFAULT_SECTION = "DEFAULT" - - // Maximum allowed depth when recursively substituing variable names. - _DEPTH_VALUES = 99 - _VERSION = "1.33.0" -) - -// Version returns current package version literal. -func Version() string { - return _VERSION -} - -var ( - // Delimiter to determine or compose a new line. - // This variable will be changed to "\r\n" automatically on Windows - // at package init time. - LineBreak = "\n" - - // Variable regexp pattern: %(variable)s - varPattern = regexp.MustCompile(`%\(([^\)]+)\)s`) - - // Indicate whether to align "=" sign with spaces to produce pretty output - // or reduce all possible spaces for compact format. - PrettyFormat = true - - // Explicitly write DEFAULT section header - DefaultHeader = false - - // Indicate whether to put a line between sections - PrettySection = true -) - -func init() { - if runtime.GOOS == "windows" { - LineBreak = "\r\n" - } -} - -func inSlice(str string, s []string) bool { - for _, v := range s { - if str == v { - return true - } - } - return false -} - -// dataSource is an interface that returns object which can be read and closed. -type dataSource interface { - ReadCloser() (io.ReadCloser, error) -} - -// sourceFile represents an object that contains content on the local file system. -type sourceFile struct { - name string -} - -func (s sourceFile) ReadCloser() (_ io.ReadCloser, err error) { - return os.Open(s.name) -} - -// sourceData represents an object that contains content in memory. -type sourceData struct { - data []byte -} - -func (s *sourceData) ReadCloser() (io.ReadCloser, error) { - return ioutil.NopCloser(bytes.NewReader(s.data)), nil -} - -// sourceReadCloser represents an input stream with Close method. -type sourceReadCloser struct { - reader io.ReadCloser -} - -func (s *sourceReadCloser) ReadCloser() (io.ReadCloser, error) { - return s.reader, nil -} - -func parseDataSource(source interface{}) (dataSource, error) { - switch s := source.(type) { - case string: - return sourceFile{s}, nil - case []byte: - return &sourceData{s}, nil - case io.ReadCloser: - return &sourceReadCloser{s}, nil - default: - return nil, fmt.Errorf("error parsing data source: unknown type '%s'", s) - } -} - -type LoadOptions struct { - // Loose indicates whether the parser should ignore nonexistent files or return error. - Loose bool - // Insensitive indicates whether the parser forces all section and key names to lowercase. - Insensitive bool - // IgnoreContinuation indicates whether to ignore continuation lines while parsing. - IgnoreContinuation bool - // IgnoreInlineComment indicates whether to ignore comments at the end of value and treat it as part of value. - IgnoreInlineComment bool - // AllowBooleanKeys indicates whether to allow boolean type keys or treat as value is missing. - // This type of keys are mostly used in my.cnf. - AllowBooleanKeys bool - // AllowShadows indicates whether to keep track of keys with same name under same section. - AllowShadows bool - // AllowNestedValues indicates whether to allow AWS-like nested values. - // Docs: http://docs.aws.amazon.com/cli/latest/topic/config-vars.html#nested-values - AllowNestedValues bool - // UnescapeValueDoubleQuotes indicates whether to unescape double quotes inside value to regular format - // when value is surrounded by double quotes, e.g. key="a \"value\"" => key=a "value" - UnescapeValueDoubleQuotes bool - // UnescapeValueCommentSymbols indicates to unescape comment symbols (\# and \;) inside value to regular format - // when value is NOT surrounded by any quotes. - // Note: UNSTABLE, behavior might change to only unescape inside double quotes but may noy necessary at all. - UnescapeValueCommentSymbols bool - // Some INI formats allow group blocks that store a block of raw content that doesn't otherwise - // conform to key/value pairs. Specify the names of those blocks here. - UnparseableSections []string -} - -func LoadSources(opts LoadOptions, source interface{}, others ...interface{}) (_ *File, err error) { - sources := make([]dataSource, len(others)+1) - sources[0], err = parseDataSource(source) - if err != nil { - return nil, err - } - for i := range others { - sources[i+1], err = parseDataSource(others[i]) - if err != nil { - return nil, err - } - } - f := newFile(sources, opts) - if err = f.Reload(); err != nil { - return nil, err - } - return f, nil -} - -// Load loads and parses from INI data sources. -// Arguments can be mixed of file name with string type, or raw data in []byte. -// It will return error if list contains nonexistent files. -func Load(source interface{}, others ...interface{}) (*File, error) { - return LoadSources(LoadOptions{}, source, others...) -} - -// LooseLoad has exactly same functionality as Load function -// except it ignores nonexistent files instead of returning error. -func LooseLoad(source interface{}, others ...interface{}) (*File, error) { - return LoadSources(LoadOptions{Loose: true}, source, others...) -} - -// InsensitiveLoad has exactly same functionality as Load function -// except it forces all section and key names to be lowercased. -func InsensitiveLoad(source interface{}, others ...interface{}) (*File, error) { - return LoadSources(LoadOptions{Insensitive: true}, source, others...) -} - -// InsensitiveLoad has exactly same functionality as Load function -// except it allows have shadow keys. -func ShadowLoad(source interface{}, others ...interface{}) (*File, error) { - return LoadSources(LoadOptions{AllowShadows: true}, source, others...) -} diff --git a/vendor/github.com/go-ini/ini/key.go b/vendor/github.com/go-ini/ini/key.go deleted file mode 100644 index 7c8566a1b4c..00000000000 --- a/vendor/github.com/go-ini/ini/key.go +++ /dev/null @@ -1,751 +0,0 @@ -// Copyright 2014 Unknwon -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -package ini - -import ( - "bytes" - "errors" - "fmt" - "strconv" - "strings" - "time" -) - -// Key represents a key under a section. -type Key struct { - s *Section - Comment string - name string - value string - isAutoIncrement bool - isBooleanType bool - - isShadow bool - shadows []*Key - - nestedValues []string -} - -// newKey simply return a key object with given values. -func newKey(s *Section, name, val string) *Key { - return &Key{ - s: s, - name: name, - value: val, - } -} - -func (k *Key) addShadow(val string) error { - if k.isShadow { - return errors.New("cannot add shadow to another shadow key") - } else if k.isAutoIncrement || k.isBooleanType { - return errors.New("cannot add shadow to auto-increment or boolean key") - } - - shadow := newKey(k.s, k.name, val) - shadow.isShadow = true - k.shadows = append(k.shadows, shadow) - return nil -} - -// AddShadow adds a new shadow key to itself. -func (k *Key) AddShadow(val string) error { - if !k.s.f.options.AllowShadows { - return errors.New("shadow key is not allowed") - } - return k.addShadow(val) -} - -func (k *Key) addNestedValue(val string) error { - if k.isAutoIncrement || k.isBooleanType { - return errors.New("cannot add nested value to auto-increment or boolean key") - } - - k.nestedValues = append(k.nestedValues, val) - return nil -} - -func (k *Key) AddNestedValue(val string) error { - if !k.s.f.options.AllowNestedValues { - return errors.New("nested value is not allowed") - } - return k.addNestedValue(val) -} - -// ValueMapper represents a mapping function for values, e.g. os.ExpandEnv -type ValueMapper func(string) string - -// Name returns name of key. -func (k *Key) Name() string { - return k.name -} - -// Value returns raw value of key for performance purpose. -func (k *Key) Value() string { - return k.value -} - -// ValueWithShadows returns raw values of key and its shadows if any. -func (k *Key) ValueWithShadows() []string { - if len(k.shadows) == 0 { - return []string{k.value} - } - vals := make([]string, len(k.shadows)+1) - vals[0] = k.value - for i := range k.shadows { - vals[i+1] = k.shadows[i].value - } - return vals -} - -// NestedValues returns nested values stored in the key. -// It is possible returned value is nil if no nested values stored in the key. -func (k *Key) NestedValues() []string { - return k.nestedValues -} - -// transformValue takes a raw value and transforms to its final string. -func (k *Key) transformValue(val string) string { - if k.s.f.ValueMapper != nil { - val = k.s.f.ValueMapper(val) - } - - // Fail-fast if no indicate char found for recursive value - if !strings.Contains(val, "%") { - return val - } - for i := 0; i < _DEPTH_VALUES; i++ { - vr := varPattern.FindString(val) - if len(vr) == 0 { - break - } - - // Take off leading '%(' and trailing ')s'. - noption := strings.TrimLeft(vr, "%(") - noption = strings.TrimRight(noption, ")s") - - // Search in the same section. - nk, err := k.s.GetKey(noption) - if err != nil || k == nk { - // Search again in default section. - nk, _ = k.s.f.Section("").GetKey(noption) - } - - // Substitute by new value and take off leading '%(' and trailing ')s'. - val = strings.Replace(val, vr, nk.value, -1) - } - return val -} - -// String returns string representation of value. -func (k *Key) String() string { - return k.transformValue(k.value) -} - -// Validate accepts a validate function which can -// return modifed result as key value. -func (k *Key) Validate(fn func(string) string) string { - return fn(k.String()) -} - -// parseBool returns the boolean value represented by the string. -// -// It accepts 1, t, T, TRUE, true, True, YES, yes, Yes, y, ON, on, On, -// 0, f, F, FALSE, false, False, NO, no, No, n, OFF, off, Off. -// Any other value returns an error. -func parseBool(str string) (value bool, err error) { - switch str { - case "1", "t", "T", "true", "TRUE", "True", "YES", "yes", "Yes", "y", "ON", "on", "On": - return true, nil - case "0", "f", "F", "false", "FALSE", "False", "NO", "no", "No", "n", "OFF", "off", "Off": - return false, nil - } - return false, fmt.Errorf("parsing \"%s\": invalid syntax", str) -} - -// Bool returns bool type value. -func (k *Key) Bool() (bool, error) { - return parseBool(k.String()) -} - -// Float64 returns float64 type value. -func (k *Key) Float64() (float64, error) { - return strconv.ParseFloat(k.String(), 64) -} - -// Int returns int type value. -func (k *Key) Int() (int, error) { - return strconv.Atoi(k.String()) -} - -// Int64 returns int64 type value. -func (k *Key) Int64() (int64, error) { - return strconv.ParseInt(k.String(), 10, 64) -} - -// Uint returns uint type valued. -func (k *Key) Uint() (uint, error) { - u, e := strconv.ParseUint(k.String(), 10, 64) - return uint(u), e -} - -// Uint64 returns uint64 type value. -func (k *Key) Uint64() (uint64, error) { - return strconv.ParseUint(k.String(), 10, 64) -} - -// Duration returns time.Duration type value. -func (k *Key) Duration() (time.Duration, error) { - return time.ParseDuration(k.String()) -} - -// TimeFormat parses with given format and returns time.Time type value. -func (k *Key) TimeFormat(format string) (time.Time, error) { - return time.Parse(format, k.String()) -} - -// Time parses with RFC3339 format and returns time.Time type value. -func (k *Key) Time() (time.Time, error) { - return k.TimeFormat(time.RFC3339) -} - -// MustString returns default value if key value is empty. -func (k *Key) MustString(defaultVal string) string { - val := k.String() - if len(val) == 0 { - k.value = defaultVal - return defaultVal - } - return val -} - -// MustBool always returns value without error, -// it returns false if error occurs. -func (k *Key) MustBool(defaultVal ...bool) bool { - val, err := k.Bool() - if len(defaultVal) > 0 && err != nil { - k.value = strconv.FormatBool(defaultVal[0]) - return defaultVal[0] - } - return val -} - -// MustFloat64 always returns value without error, -// it returns 0.0 if error occurs. -func (k *Key) MustFloat64(defaultVal ...float64) float64 { - val, err := k.Float64() - if len(defaultVal) > 0 && err != nil { - k.value = strconv.FormatFloat(defaultVal[0], 'f', -1, 64) - return defaultVal[0] - } - return val -} - -// MustInt always returns value without error, -// it returns 0 if error occurs. -func (k *Key) MustInt(defaultVal ...int) int { - val, err := k.Int() - if len(defaultVal) > 0 && err != nil { - k.value = strconv.FormatInt(int64(defaultVal[0]), 10) - return defaultVal[0] - } - return val -} - -// MustInt64 always returns value without error, -// it returns 0 if error occurs. -func (k *Key) MustInt64(defaultVal ...int64) int64 { - val, err := k.Int64() - if len(defaultVal) > 0 && err != nil { - k.value = strconv.FormatInt(defaultVal[0], 10) - return defaultVal[0] - } - return val -} - -// MustUint always returns value without error, -// it returns 0 if error occurs. -func (k *Key) MustUint(defaultVal ...uint) uint { - val, err := k.Uint() - if len(defaultVal) > 0 && err != nil { - k.value = strconv.FormatUint(uint64(defaultVal[0]), 10) - return defaultVal[0] - } - return val -} - -// MustUint64 always returns value without error, -// it returns 0 if error occurs. -func (k *Key) MustUint64(defaultVal ...uint64) uint64 { - val, err := k.Uint64() - if len(defaultVal) > 0 && err != nil { - k.value = strconv.FormatUint(defaultVal[0], 10) - return defaultVal[0] - } - return val -} - -// MustDuration always returns value without error, -// it returns zero value if error occurs. -func (k *Key) MustDuration(defaultVal ...time.Duration) time.Duration { - val, err := k.Duration() - if len(defaultVal) > 0 && err != nil { - k.value = defaultVal[0].String() - return defaultVal[0] - } - return val -} - -// MustTimeFormat always parses with given format and returns value without error, -// it returns zero value if error occurs. -func (k *Key) MustTimeFormat(format string, defaultVal ...time.Time) time.Time { - val, err := k.TimeFormat(format) - if len(defaultVal) > 0 && err != nil { - k.value = defaultVal[0].Format(format) - return defaultVal[0] - } - return val -} - -// MustTime always parses with RFC3339 format and returns value without error, -// it returns zero value if error occurs. -func (k *Key) MustTime(defaultVal ...time.Time) time.Time { - return k.MustTimeFormat(time.RFC3339, defaultVal...) -} - -// In always returns value without error, -// it returns default value if error occurs or doesn't fit into candidates. -func (k *Key) In(defaultVal string, candidates []string) string { - val := k.String() - for _, cand := range candidates { - if val == cand { - return val - } - } - return defaultVal -} - -// InFloat64 always returns value without error, -// it returns default value if error occurs or doesn't fit into candidates. -func (k *Key) InFloat64(defaultVal float64, candidates []float64) float64 { - val := k.MustFloat64() - for _, cand := range candidates { - if val == cand { - return val - } - } - return defaultVal -} - -// InInt always returns value without error, -// it returns default value if error occurs or doesn't fit into candidates. -func (k *Key) InInt(defaultVal int, candidates []int) int { - val := k.MustInt() - for _, cand := range candidates { - if val == cand { - return val - } - } - return defaultVal -} - -// InInt64 always returns value without error, -// it returns default value if error occurs or doesn't fit into candidates. -func (k *Key) InInt64(defaultVal int64, candidates []int64) int64 { - val := k.MustInt64() - for _, cand := range candidates { - if val == cand { - return val - } - } - return defaultVal -} - -// InUint always returns value without error, -// it returns default value if error occurs or doesn't fit into candidates. -func (k *Key) InUint(defaultVal uint, candidates []uint) uint { - val := k.MustUint() - for _, cand := range candidates { - if val == cand { - return val - } - } - return defaultVal -} - -// InUint64 always returns value without error, -// it returns default value if error occurs or doesn't fit into candidates. -func (k *Key) InUint64(defaultVal uint64, candidates []uint64) uint64 { - val := k.MustUint64() - for _, cand := range candidates { - if val == cand { - return val - } - } - return defaultVal -} - -// InTimeFormat always parses with given format and returns value without error, -// it returns default value if error occurs or doesn't fit into candidates. -func (k *Key) InTimeFormat(format string, defaultVal time.Time, candidates []time.Time) time.Time { - val := k.MustTimeFormat(format) - for _, cand := range candidates { - if val == cand { - return val - } - } - return defaultVal -} - -// InTime always parses with RFC3339 format and returns value without error, -// it returns default value if error occurs or doesn't fit into candidates. -func (k *Key) InTime(defaultVal time.Time, candidates []time.Time) time.Time { - return k.InTimeFormat(time.RFC3339, defaultVal, candidates) -} - -// RangeFloat64 checks if value is in given range inclusively, -// and returns default value if it's not. -func (k *Key) RangeFloat64(defaultVal, min, max float64) float64 { - val := k.MustFloat64() - if val < min || val > max { - return defaultVal - } - return val -} - -// RangeInt checks if value is in given range inclusively, -// and returns default value if it's not. -func (k *Key) RangeInt(defaultVal, min, max int) int { - val := k.MustInt() - if val < min || val > max { - return defaultVal - } - return val -} - -// RangeInt64 checks if value is in given range inclusively, -// and returns default value if it's not. -func (k *Key) RangeInt64(defaultVal, min, max int64) int64 { - val := k.MustInt64() - if val < min || val > max { - return defaultVal - } - return val -} - -// RangeTimeFormat checks if value with given format is in given range inclusively, -// and returns default value if it's not. -func (k *Key) RangeTimeFormat(format string, defaultVal, min, max time.Time) time.Time { - val := k.MustTimeFormat(format) - if val.Unix() < min.Unix() || val.Unix() > max.Unix() { - return defaultVal - } - return val -} - -// RangeTime checks if value with RFC3339 format is in given range inclusively, -// and returns default value if it's not. -func (k *Key) RangeTime(defaultVal, min, max time.Time) time.Time { - return k.RangeTimeFormat(time.RFC3339, defaultVal, min, max) -} - -// Strings returns list of string divided by given delimiter. -func (k *Key) Strings(delim string) []string { - str := k.String() - if len(str) == 0 { - return []string{} - } - - runes := []rune(str) - vals := make([]string, 0, 2) - var buf bytes.Buffer - escape := false - idx := 0 - for { - if escape { - escape = false - if runes[idx] != '\\' && !strings.HasPrefix(string(runes[idx:]), delim) { - buf.WriteRune('\\') - } - buf.WriteRune(runes[idx]) - } else { - if runes[idx] == '\\' { - escape = true - } else if strings.HasPrefix(string(runes[idx:]), delim) { - idx += len(delim) - 1 - vals = append(vals, strings.TrimSpace(buf.String())) - buf.Reset() - } else { - buf.WriteRune(runes[idx]) - } - } - idx += 1 - if idx == len(runes) { - break - } - } - - if buf.Len() > 0 { - vals = append(vals, strings.TrimSpace(buf.String())) - } - - return vals -} - -// StringsWithShadows returns list of string divided by given delimiter. -// Shadows will also be appended if any. -func (k *Key) StringsWithShadows(delim string) []string { - vals := k.ValueWithShadows() - results := make([]string, 0, len(vals)*2) - for i := range vals { - if len(vals) == 0 { - continue - } - - results = append(results, strings.Split(vals[i], delim)...) - } - - for i := range results { - results[i] = k.transformValue(strings.TrimSpace(results[i])) - } - return results -} - -// Float64s returns list of float64 divided by given delimiter. Any invalid input will be treated as zero value. -func (k *Key) Float64s(delim string) []float64 { - vals, _ := k.parseFloat64s(k.Strings(delim), true, false) - return vals -} - -// Ints returns list of int divided by given delimiter. Any invalid input will be treated as zero value. -func (k *Key) Ints(delim string) []int { - vals, _ := k.parseInts(k.Strings(delim), true, false) - return vals -} - -// Int64s returns list of int64 divided by given delimiter. Any invalid input will be treated as zero value. -func (k *Key) Int64s(delim string) []int64 { - vals, _ := k.parseInt64s(k.Strings(delim), true, false) - return vals -} - -// Uints returns list of uint divided by given delimiter. Any invalid input will be treated as zero value. -func (k *Key) Uints(delim string) []uint { - vals, _ := k.parseUints(k.Strings(delim), true, false) - return vals -} - -// Uint64s returns list of uint64 divided by given delimiter. Any invalid input will be treated as zero value. -func (k *Key) Uint64s(delim string) []uint64 { - vals, _ := k.parseUint64s(k.Strings(delim), true, false) - return vals -} - -// TimesFormat parses with given format and returns list of time.Time divided by given delimiter. -// Any invalid input will be treated as zero value (0001-01-01 00:00:00 +0000 UTC). -func (k *Key) TimesFormat(format, delim string) []time.Time { - vals, _ := k.parseTimesFormat(format, k.Strings(delim), true, false) - return vals -} - -// Times parses with RFC3339 format and returns list of time.Time divided by given delimiter. -// Any invalid input will be treated as zero value (0001-01-01 00:00:00 +0000 UTC). -func (k *Key) Times(delim string) []time.Time { - return k.TimesFormat(time.RFC3339, delim) -} - -// ValidFloat64s returns list of float64 divided by given delimiter. If some value is not float, then -// it will not be included to result list. -func (k *Key) ValidFloat64s(delim string) []float64 { - vals, _ := k.parseFloat64s(k.Strings(delim), false, false) - return vals -} - -// ValidInts returns list of int divided by given delimiter. If some value is not integer, then it will -// not be included to result list. -func (k *Key) ValidInts(delim string) []int { - vals, _ := k.parseInts(k.Strings(delim), false, false) - return vals -} - -// ValidInt64s returns list of int64 divided by given delimiter. If some value is not 64-bit integer, -// then it will not be included to result list. -func (k *Key) ValidInt64s(delim string) []int64 { - vals, _ := k.parseInt64s(k.Strings(delim), false, false) - return vals -} - -// ValidUints returns list of uint divided by given delimiter. If some value is not unsigned integer, -// then it will not be included to result list. -func (k *Key) ValidUints(delim string) []uint { - vals, _ := k.parseUints(k.Strings(delim), false, false) - return vals -} - -// ValidUint64s returns list of uint64 divided by given delimiter. If some value is not 64-bit unsigned -// integer, then it will not be included to result list. -func (k *Key) ValidUint64s(delim string) []uint64 { - vals, _ := k.parseUint64s(k.Strings(delim), false, false) - return vals -} - -// ValidTimesFormat parses with given format and returns list of time.Time divided by given delimiter. -func (k *Key) ValidTimesFormat(format, delim string) []time.Time { - vals, _ := k.parseTimesFormat(format, k.Strings(delim), false, false) - return vals -} - -// ValidTimes parses with RFC3339 format and returns list of time.Time divided by given delimiter. -func (k *Key) ValidTimes(delim string) []time.Time { - return k.ValidTimesFormat(time.RFC3339, delim) -} - -// StrictFloat64s returns list of float64 divided by given delimiter or error on first invalid input. -func (k *Key) StrictFloat64s(delim string) ([]float64, error) { - return k.parseFloat64s(k.Strings(delim), false, true) -} - -// StrictInts returns list of int divided by given delimiter or error on first invalid input. -func (k *Key) StrictInts(delim string) ([]int, error) { - return k.parseInts(k.Strings(delim), false, true) -} - -// StrictInt64s returns list of int64 divided by given delimiter or error on first invalid input. -func (k *Key) StrictInt64s(delim string) ([]int64, error) { - return k.parseInt64s(k.Strings(delim), false, true) -} - -// StrictUints returns list of uint divided by given delimiter or error on first invalid input. -func (k *Key) StrictUints(delim string) ([]uint, error) { - return k.parseUints(k.Strings(delim), false, true) -} - -// StrictUint64s returns list of uint64 divided by given delimiter or error on first invalid input. -func (k *Key) StrictUint64s(delim string) ([]uint64, error) { - return k.parseUint64s(k.Strings(delim), false, true) -} - -// StrictTimesFormat parses with given format and returns list of time.Time divided by given delimiter -// or error on first invalid input. -func (k *Key) StrictTimesFormat(format, delim string) ([]time.Time, error) { - return k.parseTimesFormat(format, k.Strings(delim), false, true) -} - -// StrictTimes parses with RFC3339 format and returns list of time.Time divided by given delimiter -// or error on first invalid input. -func (k *Key) StrictTimes(delim string) ([]time.Time, error) { - return k.StrictTimesFormat(time.RFC3339, delim) -} - -// parseFloat64s transforms strings to float64s. -func (k *Key) parseFloat64s(strs []string, addInvalid, returnOnInvalid bool) ([]float64, error) { - vals := make([]float64, 0, len(strs)) - for _, str := range strs { - val, err := strconv.ParseFloat(str, 64) - if err != nil && returnOnInvalid { - return nil, err - } - if err == nil || addInvalid { - vals = append(vals, val) - } - } - return vals, nil -} - -// parseInts transforms strings to ints. -func (k *Key) parseInts(strs []string, addInvalid, returnOnInvalid bool) ([]int, error) { - vals := make([]int, 0, len(strs)) - for _, str := range strs { - val, err := strconv.Atoi(str) - if err != nil && returnOnInvalid { - return nil, err - } - if err == nil || addInvalid { - vals = append(vals, val) - } - } - return vals, nil -} - -// parseInt64s transforms strings to int64s. -func (k *Key) parseInt64s(strs []string, addInvalid, returnOnInvalid bool) ([]int64, error) { - vals := make([]int64, 0, len(strs)) - for _, str := range strs { - val, err := strconv.ParseInt(str, 10, 64) - if err != nil && returnOnInvalid { - return nil, err - } - if err == nil || addInvalid { - vals = append(vals, val) - } - } - return vals, nil -} - -// parseUints transforms strings to uints. -func (k *Key) parseUints(strs []string, addInvalid, returnOnInvalid bool) ([]uint, error) { - vals := make([]uint, 0, len(strs)) - for _, str := range strs { - val, err := strconv.ParseUint(str, 10, 0) - if err != nil && returnOnInvalid { - return nil, err - } - if err == nil || addInvalid { - vals = append(vals, uint(val)) - } - } - return vals, nil -} - -// parseUint64s transforms strings to uint64s. -func (k *Key) parseUint64s(strs []string, addInvalid, returnOnInvalid bool) ([]uint64, error) { - vals := make([]uint64, 0, len(strs)) - for _, str := range strs { - val, err := strconv.ParseUint(str, 10, 64) - if err != nil && returnOnInvalid { - return nil, err - } - if err == nil || addInvalid { - vals = append(vals, val) - } - } - return vals, nil -} - -// parseTimesFormat transforms strings to times in given format. -func (k *Key) parseTimesFormat(format string, strs []string, addInvalid, returnOnInvalid bool) ([]time.Time, error) { - vals := make([]time.Time, 0, len(strs)) - for _, str := range strs { - val, err := time.Parse(format, str) - if err != nil && returnOnInvalid { - return nil, err - } - if err == nil || addInvalid { - vals = append(vals, val) - } - } - return vals, nil -} - -// SetValue changes key value. -func (k *Key) SetValue(v string) { - if k.s.f.BlockMode { - k.s.f.lock.Lock() - defer k.s.f.lock.Unlock() - } - - k.value = v - k.s.keysHash[k.name] = v -} diff --git a/vendor/github.com/go-ini/ini/parser.go b/vendor/github.com/go-ini/ini/parser.go deleted file mode 100644 index db3af8f0044..00000000000 --- a/vendor/github.com/go-ini/ini/parser.go +++ /dev/null @@ -1,401 +0,0 @@ -// Copyright 2015 Unknwon -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -package ini - -import ( - "bufio" - "bytes" - "fmt" - "io" - "strconv" - "strings" - "unicode" -) - -type tokenType int - -const ( - _TOKEN_INVALID tokenType = iota - _TOKEN_COMMENT - _TOKEN_SECTION - _TOKEN_KEY -) - -type parser struct { - buf *bufio.Reader - isEOF bool - count int - comment *bytes.Buffer -} - -func newParser(r io.Reader) *parser { - return &parser{ - buf: bufio.NewReader(r), - count: 1, - comment: &bytes.Buffer{}, - } -} - -// BOM handles header of UTF-8, UTF-16 LE and UTF-16 BE's BOM format. -// http://en.wikipedia.org/wiki/Byte_order_mark#Representations_of_byte_order_marks_by_encoding -func (p *parser) BOM() error { - mask, err := p.buf.Peek(2) - if err != nil && err != io.EOF { - return err - } else if len(mask) < 2 { - return nil - } - - switch { - case mask[0] == 254 && mask[1] == 255: - fallthrough - case mask[0] == 255 && mask[1] == 254: - p.buf.Read(mask) - case mask[0] == 239 && mask[1] == 187: - mask, err := p.buf.Peek(3) - if err != nil && err != io.EOF { - return err - } else if len(mask) < 3 { - return nil - } - if mask[2] == 191 { - p.buf.Read(mask) - } - } - return nil -} - -func (p *parser) readUntil(delim byte) ([]byte, error) { - data, err := p.buf.ReadBytes(delim) - if err != nil { - if err == io.EOF { - p.isEOF = true - } else { - return nil, err - } - } - return data, nil -} - -func cleanComment(in []byte) ([]byte, bool) { - i := bytes.IndexAny(in, "#;") - if i == -1 { - return nil, false - } - return in[i:], true -} - -func readKeyName(in []byte) (string, int, error) { - line := string(in) - - // Check if key name surrounded by quotes. - var keyQuote string - if line[0] == '"' { - if len(line) > 6 && string(line[0:3]) == `"""` { - keyQuote = `"""` - } else { - keyQuote = `"` - } - } else if line[0] == '`' { - keyQuote = "`" - } - - // Get out key name - endIdx := -1 - if len(keyQuote) > 0 { - startIdx := len(keyQuote) - // FIXME: fail case -> """"""name"""=value - pos := strings.Index(line[startIdx:], keyQuote) - if pos == -1 { - return "", -1, fmt.Errorf("missing closing key quote: %s", line) - } - pos += startIdx - - // Find key-value delimiter - i := strings.IndexAny(line[pos+startIdx:], "=:") - if i < 0 { - return "", -1, ErrDelimiterNotFound{line} - } - endIdx = pos + i - return strings.TrimSpace(line[startIdx:pos]), endIdx + startIdx + 1, nil - } - - endIdx = strings.IndexAny(line, "=:") - if endIdx < 0 { - return "", -1, ErrDelimiterNotFound{line} - } - return strings.TrimSpace(line[0:endIdx]), endIdx + 1, nil -} - -func (p *parser) readMultilines(line, val, valQuote string) (string, error) { - for { - data, err := p.readUntil('\n') - if err != nil { - return "", err - } - next := string(data) - - pos := strings.LastIndex(next, valQuote) - if pos > -1 { - val += next[:pos] - - comment, has := cleanComment([]byte(next[pos:])) - if has { - p.comment.Write(bytes.TrimSpace(comment)) - } - break - } - val += next - if p.isEOF { - return "", fmt.Errorf("missing closing key quote from '%s' to '%s'", line, next) - } - } - return val, nil -} - -func (p *parser) readContinuationLines(val string) (string, error) { - for { - data, err := p.readUntil('\n') - if err != nil { - return "", err - } - next := strings.TrimSpace(string(data)) - - if len(next) == 0 { - break - } - val += next - if val[len(val)-1] != '\\' { - break - } - val = val[:len(val)-1] - } - return val, nil -} - -// hasSurroundedQuote check if and only if the first and last characters -// are quotes \" or \'. -// It returns false if any other parts also contain same kind of quotes. -func hasSurroundedQuote(in string, quote byte) bool { - return len(in) >= 2 && in[0] == quote && in[len(in)-1] == quote && - strings.IndexByte(in[1:], quote) == len(in)-2 -} - -func (p *parser) readValue(in []byte, - ignoreContinuation, ignoreInlineComment, unescapeValueDoubleQuotes, unescapeValueCommentSymbols bool) (string, error) { - - line := strings.TrimLeftFunc(string(in), unicode.IsSpace) - if len(line) == 0 { - return "", nil - } - - var valQuote string - if len(line) > 3 && string(line[0:3]) == `"""` { - valQuote = `"""` - } else if line[0] == '`' { - valQuote = "`" - } else if unescapeValueDoubleQuotes && line[0] == '"' { - valQuote = `"` - } - - if len(valQuote) > 0 { - startIdx := len(valQuote) - pos := strings.LastIndex(line[startIdx:], valQuote) - // Check for multi-line value - if pos == -1 { - return p.readMultilines(line, line[startIdx:], valQuote) - } - - if unescapeValueDoubleQuotes && valQuote == `"` { - return strings.Replace(line[startIdx:pos+startIdx], `\"`, `"`, -1), nil - } - return line[startIdx : pos+startIdx], nil - } - - // Won't be able to reach here if value only contains whitespace - line = strings.TrimSpace(line) - - // Check continuation lines when desired - if !ignoreContinuation && line[len(line)-1] == '\\' { - return p.readContinuationLines(line[:len(line)-1]) - } - - // Check if ignore inline comment - if !ignoreInlineComment { - i := strings.IndexAny(line, "#;") - if i > -1 { - p.comment.WriteString(line[i:]) - line = strings.TrimSpace(line[:i]) - } - } - - // Trim single and double quotes - if hasSurroundedQuote(line, '\'') || - hasSurroundedQuote(line, '"') { - line = line[1 : len(line)-1] - } else if len(valQuote) == 0 && unescapeValueCommentSymbols { - if strings.Contains(line, `\;`) { - line = strings.Replace(line, `\;`, ";", -1) - } - if strings.Contains(line, `\#`) { - line = strings.Replace(line, `\#`, "#", -1) - } - } - return line, nil -} - -// parse parses data through an io.Reader. -func (f *File) parse(reader io.Reader) (err error) { - p := newParser(reader) - if err = p.BOM(); err != nil { - return fmt.Errorf("BOM: %v", err) - } - - // Ignore error because default section name is never empty string. - name := DEFAULT_SECTION - if f.options.Insensitive { - name = strings.ToLower(DEFAULT_SECTION) - } - section, _ := f.NewSection(name) - - // This "last" is not strictly equivalent to "previous one" if current key is not the first nested key - var isLastValueEmpty bool - var lastRegularKey *Key - - var line []byte - var inUnparseableSection bool - for !p.isEOF { - line, err = p.readUntil('\n') - if err != nil { - return err - } - - if f.options.AllowNestedValues && - isLastValueEmpty && len(line) > 0 { - if line[0] == ' ' || line[0] == '\t' { - lastRegularKey.addNestedValue(string(bytes.TrimSpace(line))) - continue - } - } - - line = bytes.TrimLeftFunc(line, unicode.IsSpace) - if len(line) == 0 { - continue - } - - // Comments - if line[0] == '#' || line[0] == ';' { - // Note: we do not care ending line break, - // it is needed for adding second line, - // so just clean it once at the end when set to value. - p.comment.Write(line) - continue - } - - // Section - if line[0] == '[' { - // Read to the next ']' (TODO: support quoted strings) - // TODO(unknwon): use LastIndexByte when stop supporting Go1.4 - closeIdx := bytes.LastIndex(line, []byte("]")) - if closeIdx == -1 { - return fmt.Errorf("unclosed section: %s", line) - } - - name := string(line[1:closeIdx]) - section, err = f.NewSection(name) - if err != nil { - return err - } - - comment, has := cleanComment(line[closeIdx+1:]) - if has { - p.comment.Write(comment) - } - - section.Comment = strings.TrimSpace(p.comment.String()) - - // Reset aotu-counter and comments - p.comment.Reset() - p.count = 1 - - inUnparseableSection = false - for i := range f.options.UnparseableSections { - if f.options.UnparseableSections[i] == name || - (f.options.Insensitive && strings.ToLower(f.options.UnparseableSections[i]) == strings.ToLower(name)) { - inUnparseableSection = true - continue - } - } - continue - } - - if inUnparseableSection { - section.isRawSection = true - section.rawBody += string(line) - continue - } - - kname, offset, err := readKeyName(line) - if err != nil { - // Treat as boolean key when desired, and whole line is key name. - if IsErrDelimiterNotFound(err) && f.options.AllowBooleanKeys { - kname, err := p.readValue(line, - f.options.IgnoreContinuation, - f.options.IgnoreInlineComment, - f.options.UnescapeValueDoubleQuotes, - f.options.UnescapeValueCommentSymbols) - if err != nil { - return err - } - key, err := section.NewBooleanKey(kname) - if err != nil { - return err - } - key.Comment = strings.TrimSpace(p.comment.String()) - p.comment.Reset() - continue - } - return err - } - - // Auto increment. - isAutoIncr := false - if kname == "-" { - isAutoIncr = true - kname = "#" + strconv.Itoa(p.count) - p.count++ - } - - value, err := p.readValue(line[offset:], - f.options.IgnoreContinuation, - f.options.IgnoreInlineComment, - f.options.UnescapeValueDoubleQuotes, - f.options.UnescapeValueCommentSymbols) - if err != nil { - return err - } - isLastValueEmpty = len(value) == 0 - - key, err := section.NewKey(kname, value) - if err != nil { - return err - } - key.isAutoIncrement = isAutoIncr - key.Comment = strings.TrimSpace(p.comment.String()) - p.comment.Reset() - lastRegularKey = key - } - return nil -} diff --git a/vendor/github.com/go-ini/ini/section.go b/vendor/github.com/go-ini/ini/section.go deleted file mode 100644 index d8a40261920..00000000000 --- a/vendor/github.com/go-ini/ini/section.go +++ /dev/null @@ -1,257 +0,0 @@ -// Copyright 2014 Unknwon -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -package ini - -import ( - "errors" - "fmt" - "strings" -) - -// Section represents a config section. -type Section struct { - f *File - Comment string - name string - keys map[string]*Key - keyList []string - keysHash map[string]string - - isRawSection bool - rawBody string -} - -func newSection(f *File, name string) *Section { - return &Section{ - f: f, - name: name, - keys: make(map[string]*Key), - keyList: make([]string, 0, 10), - keysHash: make(map[string]string), - } -} - -// Name returns name of Section. -func (s *Section) Name() string { - return s.name -} - -// Body returns rawBody of Section if the section was marked as unparseable. -// It still follows the other rules of the INI format surrounding leading/trailing whitespace. -func (s *Section) Body() string { - return strings.TrimSpace(s.rawBody) -} - -// SetBody updates body content only if section is raw. -func (s *Section) SetBody(body string) { - if !s.isRawSection { - return - } - s.rawBody = body -} - -// NewKey creates a new key to given section. -func (s *Section) NewKey(name, val string) (*Key, error) { - if len(name) == 0 { - return nil, errors.New("error creating new key: empty key name") - } else if s.f.options.Insensitive { - name = strings.ToLower(name) - } - - if s.f.BlockMode { - s.f.lock.Lock() - defer s.f.lock.Unlock() - } - - if inSlice(name, s.keyList) { - if s.f.options.AllowShadows { - if err := s.keys[name].addShadow(val); err != nil { - return nil, err - } - } else { - s.keys[name].value = val - } - return s.keys[name], nil - } - - s.keyList = append(s.keyList, name) - s.keys[name] = newKey(s, name, val) - s.keysHash[name] = val - return s.keys[name], nil -} - -// NewBooleanKey creates a new boolean type key to given section. -func (s *Section) NewBooleanKey(name string) (*Key, error) { - key, err := s.NewKey(name, "true") - if err != nil { - return nil, err - } - - key.isBooleanType = true - return key, nil -} - -// GetKey returns key in section by given name. -func (s *Section) GetKey(name string) (*Key, error) { - // FIXME: change to section level lock? - if s.f.BlockMode { - s.f.lock.RLock() - } - if s.f.options.Insensitive { - name = strings.ToLower(name) - } - key := s.keys[name] - if s.f.BlockMode { - s.f.lock.RUnlock() - } - - if key == nil { - // Check if it is a child-section. - sname := s.name - for { - if i := strings.LastIndex(sname, "."); i > -1 { - sname = sname[:i] - sec, err := s.f.GetSection(sname) - if err != nil { - continue - } - return sec.GetKey(name) - } else { - break - } - } - return nil, fmt.Errorf("error when getting key of section '%s': key '%s' not exists", s.name, name) - } - return key, nil -} - -// HasKey returns true if section contains a key with given name. -func (s *Section) HasKey(name string) bool { - key, _ := s.GetKey(name) - return key != nil -} - -// Haskey is a backwards-compatible name for HasKey. -// TODO: delete me in v2 -func (s *Section) Haskey(name string) bool { - return s.HasKey(name) -} - -// HasValue returns true if section contains given raw value. -func (s *Section) HasValue(value string) bool { - if s.f.BlockMode { - s.f.lock.RLock() - defer s.f.lock.RUnlock() - } - - for _, k := range s.keys { - if value == k.value { - return true - } - } - return false -} - -// Key assumes named Key exists in section and returns a zero-value when not. -func (s *Section) Key(name string) *Key { - key, err := s.GetKey(name) - if err != nil { - // It's OK here because the only possible error is empty key name, - // but if it's empty, this piece of code won't be executed. - key, _ = s.NewKey(name, "") - return key - } - return key -} - -// Keys returns list of keys of section. -func (s *Section) Keys() []*Key { - keys := make([]*Key, len(s.keyList)) - for i := range s.keyList { - keys[i] = s.Key(s.keyList[i]) - } - return keys -} - -// ParentKeys returns list of keys of parent section. -func (s *Section) ParentKeys() []*Key { - var parentKeys []*Key - sname := s.name - for { - if i := strings.LastIndex(sname, "."); i > -1 { - sname = sname[:i] - sec, err := s.f.GetSection(sname) - if err != nil { - continue - } - parentKeys = append(parentKeys, sec.Keys()...) - } else { - break - } - - } - return parentKeys -} - -// KeyStrings returns list of key names of section. -func (s *Section) KeyStrings() []string { - list := make([]string, len(s.keyList)) - copy(list, s.keyList) - return list -} - -// KeysHash returns keys hash consisting of names and values. -func (s *Section) KeysHash() map[string]string { - if s.f.BlockMode { - s.f.lock.RLock() - defer s.f.lock.RUnlock() - } - - hash := map[string]string{} - for key, value := range s.keysHash { - hash[key] = value - } - return hash -} - -// DeleteKey deletes a key from section. -func (s *Section) DeleteKey(name string) { - if s.f.BlockMode { - s.f.lock.Lock() - defer s.f.lock.Unlock() - } - - for i, k := range s.keyList { - if k == name { - s.keyList = append(s.keyList[:i], s.keyList[i+1:]...) - delete(s.keys, name) - return - } - } -} - -// ChildSections returns a list of child sections of current section. -// For example, "[parent.child1]" and "[parent.child12]" are child sections -// of section "[parent]". -func (s *Section) ChildSections() []*Section { - prefix := s.name + "." - children := make([]*Section, 0, 3) - for _, name := range s.f.sectionList { - if strings.HasPrefix(name, prefix) { - children = append(children, s.f.sections[name]) - } - } - return children -} diff --git a/vendor/github.com/go-ini/ini/struct.go b/vendor/github.com/go-ini/ini/struct.go deleted file mode 100644 index 9719dc6985a..00000000000 --- a/vendor/github.com/go-ini/ini/struct.go +++ /dev/null @@ -1,512 +0,0 @@ -// Copyright 2014 Unknwon -// -// Licensed under the Apache License, Version 2.0 (the "License"): you may -// not use this file except in compliance with the License. You may obtain -// a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -// License for the specific language governing permissions and limitations -// under the License. - -package ini - -import ( - "bytes" - "errors" - "fmt" - "reflect" - "strings" - "time" - "unicode" -) - -// NameMapper represents a ini tag name mapper. -type NameMapper func(string) string - -// Built-in name getters. -var ( - // AllCapsUnderscore converts to format ALL_CAPS_UNDERSCORE. - AllCapsUnderscore NameMapper = func(raw string) string { - newstr := make([]rune, 0, len(raw)) - for i, chr := range raw { - if isUpper := 'A' <= chr && chr <= 'Z'; isUpper { - if i > 0 { - newstr = append(newstr, '_') - } - } - newstr = append(newstr, unicode.ToUpper(chr)) - } - return string(newstr) - } - // TitleUnderscore converts to format title_underscore. - TitleUnderscore NameMapper = func(raw string) string { - newstr := make([]rune, 0, len(raw)) - for i, chr := range raw { - if isUpper := 'A' <= chr && chr <= 'Z'; isUpper { - if i > 0 { - newstr = append(newstr, '_') - } - chr -= ('A' - 'a') - } - newstr = append(newstr, chr) - } - return string(newstr) - } -) - -func (s *Section) parseFieldName(raw, actual string) string { - if len(actual) > 0 { - return actual - } - if s.f.NameMapper != nil { - return s.f.NameMapper(raw) - } - return raw -} - -func parseDelim(actual string) string { - if len(actual) > 0 { - return actual - } - return "," -} - -var reflectTime = reflect.TypeOf(time.Now()).Kind() - -// setSliceWithProperType sets proper values to slice based on its type. -func setSliceWithProperType(key *Key, field reflect.Value, delim string, allowShadow, isStrict bool) error { - var strs []string - if allowShadow { - strs = key.StringsWithShadows(delim) - } else { - strs = key.Strings(delim) - } - - numVals := len(strs) - if numVals == 0 { - return nil - } - - var vals interface{} - var err error - - sliceOf := field.Type().Elem().Kind() - switch sliceOf { - case reflect.String: - vals = strs - case reflect.Int: - vals, err = key.parseInts(strs, true, false) - case reflect.Int64: - vals, err = key.parseInt64s(strs, true, false) - case reflect.Uint: - vals, err = key.parseUints(strs, true, false) - case reflect.Uint64: - vals, err = key.parseUint64s(strs, true, false) - case reflect.Float64: - vals, err = key.parseFloat64s(strs, true, false) - case reflectTime: - vals, err = key.parseTimesFormat(time.RFC3339, strs, true, false) - default: - return fmt.Errorf("unsupported type '[]%s'", sliceOf) - } - if err != nil && isStrict { - return err - } - - slice := reflect.MakeSlice(field.Type(), numVals, numVals) - for i := 0; i < numVals; i++ { - switch sliceOf { - case reflect.String: - slice.Index(i).Set(reflect.ValueOf(vals.([]string)[i])) - case reflect.Int: - slice.Index(i).Set(reflect.ValueOf(vals.([]int)[i])) - case reflect.Int64: - slice.Index(i).Set(reflect.ValueOf(vals.([]int64)[i])) - case reflect.Uint: - slice.Index(i).Set(reflect.ValueOf(vals.([]uint)[i])) - case reflect.Uint64: - slice.Index(i).Set(reflect.ValueOf(vals.([]uint64)[i])) - case reflect.Float64: - slice.Index(i).Set(reflect.ValueOf(vals.([]float64)[i])) - case reflectTime: - slice.Index(i).Set(reflect.ValueOf(vals.([]time.Time)[i])) - } - } - field.Set(slice) - return nil -} - -func wrapStrictError(err error, isStrict bool) error { - if isStrict { - return err - } - return nil -} - -// setWithProperType sets proper value to field based on its type, -// but it does not return error for failing parsing, -// because we want to use default value that is already assigned to strcut. -func setWithProperType(t reflect.Type, key *Key, field reflect.Value, delim string, allowShadow, isStrict bool) error { - switch t.Kind() { - case reflect.String: - if len(key.String()) == 0 { - return nil - } - field.SetString(key.String()) - case reflect.Bool: - boolVal, err := key.Bool() - if err != nil { - return wrapStrictError(err, isStrict) - } - field.SetBool(boolVal) - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - durationVal, err := key.Duration() - // Skip zero value - if err == nil && int64(durationVal) > 0 { - field.Set(reflect.ValueOf(durationVal)) - return nil - } - - intVal, err := key.Int64() - if err != nil { - return wrapStrictError(err, isStrict) - } - field.SetInt(intVal) - // byte is an alias for uint8, so supporting uint8 breaks support for byte - case reflect.Uint, reflect.Uint16, reflect.Uint32, reflect.Uint64: - durationVal, err := key.Duration() - // Skip zero value - if err == nil && int(durationVal) > 0 { - field.Set(reflect.ValueOf(durationVal)) - return nil - } - - uintVal, err := key.Uint64() - if err != nil { - return wrapStrictError(err, isStrict) - } - field.SetUint(uintVal) - - case reflect.Float32, reflect.Float64: - floatVal, err := key.Float64() - if err != nil { - return wrapStrictError(err, isStrict) - } - field.SetFloat(floatVal) - case reflectTime: - timeVal, err := key.Time() - if err != nil { - return wrapStrictError(err, isStrict) - } - field.Set(reflect.ValueOf(timeVal)) - case reflect.Slice: - return setSliceWithProperType(key, field, delim, allowShadow, isStrict) - default: - return fmt.Errorf("unsupported type '%s'", t) - } - return nil -} - -func parseTagOptions(tag string) (rawName string, omitEmpty bool, allowShadow bool) { - opts := strings.SplitN(tag, ",", 3) - rawName = opts[0] - if len(opts) > 1 { - omitEmpty = opts[1] == "omitempty" - } - if len(opts) > 2 { - allowShadow = opts[2] == "allowshadow" - } - return rawName, omitEmpty, allowShadow -} - -func (s *Section) mapTo(val reflect.Value, isStrict bool) error { - if val.Kind() == reflect.Ptr { - val = val.Elem() - } - typ := val.Type() - - for i := 0; i < typ.NumField(); i++ { - field := val.Field(i) - tpField := typ.Field(i) - - tag := tpField.Tag.Get("ini") - if tag == "-" { - continue - } - - rawName, _, allowShadow := parseTagOptions(tag) - fieldName := s.parseFieldName(tpField.Name, rawName) - if len(fieldName) == 0 || !field.CanSet() { - continue - } - - isAnonymous := tpField.Type.Kind() == reflect.Ptr && tpField.Anonymous - isStruct := tpField.Type.Kind() == reflect.Struct - if isAnonymous { - field.Set(reflect.New(tpField.Type.Elem())) - } - - if isAnonymous || isStruct { - if sec, err := s.f.GetSection(fieldName); err == nil { - if err = sec.mapTo(field, isStrict); err != nil { - return fmt.Errorf("error mapping field(%s): %v", fieldName, err) - } - continue - } - } - - if key, err := s.GetKey(fieldName); err == nil { - delim := parseDelim(tpField.Tag.Get("delim")) - if err = setWithProperType(tpField.Type, key, field, delim, allowShadow, isStrict); err != nil { - return fmt.Errorf("error mapping field(%s): %v", fieldName, err) - } - } - } - return nil -} - -// MapTo maps section to given struct. -func (s *Section) MapTo(v interface{}) error { - typ := reflect.TypeOf(v) - val := reflect.ValueOf(v) - if typ.Kind() == reflect.Ptr { - typ = typ.Elem() - val = val.Elem() - } else { - return errors.New("cannot map to non-pointer struct") - } - - return s.mapTo(val, false) -} - -// MapTo maps section to given struct in strict mode, -// which returns all possible error including value parsing error. -func (s *Section) StrictMapTo(v interface{}) error { - typ := reflect.TypeOf(v) - val := reflect.ValueOf(v) - if typ.Kind() == reflect.Ptr { - typ = typ.Elem() - val = val.Elem() - } else { - return errors.New("cannot map to non-pointer struct") - } - - return s.mapTo(val, true) -} - -// MapTo maps file to given struct. -func (f *File) MapTo(v interface{}) error { - return f.Section("").MapTo(v) -} - -// MapTo maps file to given struct in strict mode, -// which returns all possible error including value parsing error. -func (f *File) StrictMapTo(v interface{}) error { - return f.Section("").StrictMapTo(v) -} - -// MapTo maps data sources to given struct with name mapper. -func MapToWithMapper(v interface{}, mapper NameMapper, source interface{}, others ...interface{}) error { - cfg, err := Load(source, others...) - if err != nil { - return err - } - cfg.NameMapper = mapper - return cfg.MapTo(v) -} - -// StrictMapToWithMapper maps data sources to given struct with name mapper in strict mode, -// which returns all possible error including value parsing error. -func StrictMapToWithMapper(v interface{}, mapper NameMapper, source interface{}, others ...interface{}) error { - cfg, err := Load(source, others...) - if err != nil { - return err - } - cfg.NameMapper = mapper - return cfg.StrictMapTo(v) -} - -// MapTo maps data sources to given struct. -func MapTo(v, source interface{}, others ...interface{}) error { - return MapToWithMapper(v, nil, source, others...) -} - -// StrictMapTo maps data sources to given struct in strict mode, -// which returns all possible error including value parsing error. -func StrictMapTo(v, source interface{}, others ...interface{}) error { - return StrictMapToWithMapper(v, nil, source, others...) -} - -// reflectSliceWithProperType does the opposite thing as setSliceWithProperType. -func reflectSliceWithProperType(key *Key, field reflect.Value, delim string) error { - slice := field.Slice(0, field.Len()) - if field.Len() == 0 { - return nil - } - - var buf bytes.Buffer - sliceOf := field.Type().Elem().Kind() - for i := 0; i < field.Len(); i++ { - switch sliceOf { - case reflect.String: - buf.WriteString(slice.Index(i).String()) - case reflect.Int, reflect.Int64: - buf.WriteString(fmt.Sprint(slice.Index(i).Int())) - case reflect.Uint, reflect.Uint64: - buf.WriteString(fmt.Sprint(slice.Index(i).Uint())) - case reflect.Float64: - buf.WriteString(fmt.Sprint(slice.Index(i).Float())) - case reflectTime: - buf.WriteString(slice.Index(i).Interface().(time.Time).Format(time.RFC3339)) - default: - return fmt.Errorf("unsupported type '[]%s'", sliceOf) - } - buf.WriteString(delim) - } - key.SetValue(buf.String()[:buf.Len()-1]) - return nil -} - -// reflectWithProperType does the opposite thing as setWithProperType. -func reflectWithProperType(t reflect.Type, key *Key, field reflect.Value, delim string) error { - switch t.Kind() { - case reflect.String: - key.SetValue(field.String()) - case reflect.Bool: - key.SetValue(fmt.Sprint(field.Bool())) - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - key.SetValue(fmt.Sprint(field.Int())) - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: - key.SetValue(fmt.Sprint(field.Uint())) - case reflect.Float32, reflect.Float64: - key.SetValue(fmt.Sprint(field.Float())) - case reflectTime: - key.SetValue(fmt.Sprint(field.Interface().(time.Time).Format(time.RFC3339))) - case reflect.Slice: - return reflectSliceWithProperType(key, field, delim) - default: - return fmt.Errorf("unsupported type '%s'", t) - } - return nil -} - -// CR: copied from encoding/json/encode.go with modifications of time.Time support. -// TODO: add more test coverage. -func isEmptyValue(v reflect.Value) bool { - switch v.Kind() { - case reflect.Array, reflect.Map, reflect.Slice, reflect.String: - return v.Len() == 0 - case reflect.Bool: - return !v.Bool() - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return v.Int() == 0 - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return v.Uint() == 0 - case reflect.Float32, reflect.Float64: - return v.Float() == 0 - case reflect.Interface, reflect.Ptr: - return v.IsNil() - case reflectTime: - t, ok := v.Interface().(time.Time) - return ok && t.IsZero() - } - return false -} - -func (s *Section) reflectFrom(val reflect.Value) error { - if val.Kind() == reflect.Ptr { - val = val.Elem() - } - typ := val.Type() - - for i := 0; i < typ.NumField(); i++ { - field := val.Field(i) - tpField := typ.Field(i) - - tag := tpField.Tag.Get("ini") - if tag == "-" { - continue - } - - opts := strings.SplitN(tag, ",", 2) - if len(opts) == 2 && opts[1] == "omitempty" && isEmptyValue(field) { - continue - } - - fieldName := s.parseFieldName(tpField.Name, opts[0]) - if len(fieldName) == 0 || !field.CanSet() { - continue - } - - if (tpField.Type.Kind() == reflect.Ptr && tpField.Anonymous) || - (tpField.Type.Kind() == reflect.Struct && tpField.Type.Name() != "Time") { - // Note: The only error here is section doesn't exist. - sec, err := s.f.GetSection(fieldName) - if err != nil { - // Note: fieldName can never be empty here, ignore error. - sec, _ = s.f.NewSection(fieldName) - } - - // Add comment from comment tag - if len(sec.Comment) == 0 { - sec.Comment = tpField.Tag.Get("comment") - } - - if err = sec.reflectFrom(field); err != nil { - return fmt.Errorf("error reflecting field (%s): %v", fieldName, err) - } - continue - } - - // Note: Same reason as secion. - key, err := s.GetKey(fieldName) - if err != nil { - key, _ = s.NewKey(fieldName, "") - } - - // Add comment from comment tag - if len(key.Comment) == 0 { - key.Comment = tpField.Tag.Get("comment") - } - - if err = reflectWithProperType(tpField.Type, key, field, parseDelim(tpField.Tag.Get("delim"))); err != nil { - return fmt.Errorf("error reflecting field (%s): %v", fieldName, err) - } - - } - return nil -} - -// ReflectFrom reflects secion from given struct. -func (s *Section) ReflectFrom(v interface{}) error { - typ := reflect.TypeOf(v) - val := reflect.ValueOf(v) - if typ.Kind() == reflect.Ptr { - typ = typ.Elem() - val = val.Elem() - } else { - return errors.New("cannot reflect from non-pointer struct") - } - - return s.reflectFrom(val) -} - -// ReflectFrom reflects file from given struct. -func (f *File) ReflectFrom(v interface{}) error { - return f.Section("").ReflectFrom(v) -} - -// ReflectFrom reflects data sources from given struct with name mapper. -func ReflectFromWithMapper(cfg *File, v interface{}, mapper NameMapper) error { - cfg.NameMapper = mapper - return cfg.ReflectFrom(v) -} - -// ReflectFrom reflects data sources from given struct. -func ReflectFrom(cfg *File, v interface{}) error { - return ReflectFromWithMapper(cfg, v, nil) -} diff --git a/vendor/github.com/go-openapi/jsonpointer/.travis.yml b/vendor/github.com/go-openapi/jsonpointer/.travis.yml index d0f383a26ba..2ee3ab97581 100644 --- a/vendor/github.com/go-openapi/jsonpointer/.travis.yml +++ b/vendor/github.com/go-openapi/jsonpointer/.travis.yml @@ -1,6 +1,8 @@ language: go go: -- 1.7 +- "1.8" +- "1.9" +- "1.10" install: - go get -u github.com/stretchr/testify/assert - go get -u github.com/go-openapi/swag diff --git a/vendor/github.com/go-openapi/jsonreference/.travis.yml b/vendor/github.com/go-openapi/jsonreference/.travis.yml index 5b31a1b3e1a..7a261a651ed 100644 --- a/vendor/github.com/go-openapi/jsonreference/.travis.yml +++ b/vendor/github.com/go-openapi/jsonreference/.travis.yml @@ -1,6 +1,8 @@ language: go go: -- 1.7 +- "1.8" +- "1.9" +- "1.10" install: - go get -u github.com/stretchr/testify/assert - go get -u github.com/PuerkitoBio/purell diff --git a/vendor/github.com/go-openapi/spec/bindata.go b/vendor/github.com/go-openapi/spec/bindata.go index 9afb5df194e..693917a07bd 100644 --- a/vendor/github.com/go-openapi/spec/bindata.go +++ b/vendor/github.com/go-openapi/spec/bindata.go @@ -69,7 +69,7 @@ func (fi bindataFileInfo) Sys() interface{} { return nil } -var _jsonschemaDraft04JSON = []byte("\x1f\x8b\x08\x00\x00\x09\x6e\x88\x00\xff\xc4\x57\x3b\x6f\xdb\x3e\x10\xdf\xf3\x29\x08\x26\x63\xf2\x97\xff\x40\x27\x6f\x45\xbb\x18\x68\xd1\x0c\xdd\x0c\x0f\xb4\x75\xb2\x19\x50\xa4\x42\x51\x81\x0d\x43\xdf\xbd\xa0\xa8\x07\x29\x91\x92\x2d\xbb\x8d\x97\x28\xbc\xd7\xef\x8e\xf7\xe2\xf9\x01\x21\x84\x30\x8d\xf1\x12\xe1\x83\x52\xd9\x32\x8a\xde\x72\xc1\x5f\xf2\xdd\x01\x52\xf2\x9f\x90\xfb\x28\x96\x24\x51\x2f\x8b\x2f\x91\x39\x7b\xc4\xcf\x46\xe8\xc9\xfc\x3f\x43\x32\x86\x7c\x27\x69\xa6\xa8\xe0\x5a\xfa\x9b\x90\x80\x0c\x0b\x4a\x41\x91\x5a\x45\xc7\x9d\x50\x4e\x35\x73\x8e\x97\xc8\x20\xae\x08\x86\xed\xab\x94\xe4\xe4\x10\x2a\xa2\x3a\x65\xa0\x95\x93\x8a\xfc\xec\x12\x53\xca\x57\x0a\x52\xad\xef\xff\x1e\x89\xd6\xe7\x67\x84\x9f\x24\x24\x5a\xc5\x23\x46\x65\xcb\x54\x76\xfc\x38\x13\x39\x55\xf4\x03\x56\x5c\xc1\x1e\x64\x18\x04\xad\x19\x86\x30\x68\x5a\xa4\x78\x89\x16\x97\xe8\xff\x0e\x09\x29\x98\x5a\x0c\xed\x10\xc6\x7e\x69\xa8\x6b\x07\x76\x64\x45\x2e\xea\x63\x45\xe5\xb3\x66\x8e\x8d\x4e\x0d\x01\x95\x68\xe3\x85\x91\xd3\x34\x63\xf0\xfb\x94\x41\x3e\x34\x0d\xbc\x72\x60\xdd\x46\x1a\xe1\xad\x10\x0c\x08\xd7\x9f\xad\xe3\x08\xf3\x82\x31\xf3\x37\xdd\x9a\x13\xb1\x7d\x83\x9d\xd2\x5f\xb9\x92\x94\xef\x71\xc8\x7e\x45\x9d\x73\xcf\xd6\x65\x36\x7c\x8d\xa9\xf2\xf2\x94\x28\x38\x7d\x2f\xa0\xa1\x2a\x59\x40\x07\xf3\xc1\x02\xdb\xda\x68\x1c\x33\xa7\x99\x14\x19\x48\x45\x7b\xd1\x33\x45\x17\xf0\xa6\x46\xd9\x03\x92\x08\x99\x12\x7d\x57\xb8\x90\x14\x7b\x63\xd5\x15\xe5\xbd\x35\x2b\xaa\x18\x4c\xea\xf5\x8a\xba\xf5\x3e\x4b\x41\x93\xa5\x67\xfb\x38\x2d\x98\xa2\x19\x83\x2a\xf7\x03\x6a\x9b\x74\x0b\x56\x5e\x8f\x02\xc7\x1d\x2b\x72\xfa\x01\x3f\x5b\x16\xf7\xc6\x6d\xfb\xe4\x58\xb3\x8c\x1b\xf7\x0a\x77\x86\xa6\xb4\xb4\xf5\xe4\x92\xbb\xa0\x24\x84\xe5\x01\x84\xad\x13\x37\x21\x9c\xd2\x72\x0b\x42\x72\xfc\x01\x7c\xaf\x0e\xbd\x9e\x3b\xd5\xbc\x1c\x1f\xaf\xd6\xd0\xb6\x52\xb7\xdf\x12\xa5\x40\x4e\xe7\x68\xb0\x78\x24\xec\xe1\xe8\x0f\x26\x89\xe3\x0a\x0a\x61\x4d\x23\xe9\xf7\x70\x7e\x32\x3d\xdc\x39\xd6\xbf\xf3\x30\xd0\xfd\xf6\x55\xb3\x79\x27\x96\xfe\x6d\x82\x37\x73\xf6\x8f\x36\x3a\x03\xa4\x6d\x7d\x1c\x9e\x73\x35\xf6\x18\xbf\x15\x76\x4a\x8e\x2b\xcf\x00\xbf\x2a\x99\xae\x55\xe0\xcf\x25\x77\x68\xfc\x95\xba\x79\x75\x06\xcb\x5c\x77\x67\x69\xf1\xfb\x2c\xe1\xbd\xa0\x12\xe2\x31\x45\xf6\x30\x0f\x14\xc8\xab\x7f\x60\x4e\x27\xe0\x3f\xaf\x92\xd0\x6a\x8a\x82\xdb\xc0\xa4\xbb\x63\x65\x34\x0d\x28\xb0\x6b\x7c\x1e\x1e\xd3\x51\xc7\x6e\xf4\x33\x60\xc5\x90\x01\x8f\x81\xef\xee\x88\x68\x90\x69\x23\xb9\x8a\x2e\x69\x98\x7d\xa6\x91\x32\x1a\xc8\x6e\x9c\x13\x7f\x10\xea\xcd\xfd\x4e\xef\xa6\xb1\x25\xd9\xde\x22\x8d\xfa\x59\x63\xc5\x0d\x80\xf5\x28\xf1\xd6\xb9\x37\x9e\xa3\xee\xb5\x4c\xbe\x37\xe0\x55\xc6\x27\x82\x75\x49\xd0\xda\xe0\xb9\x1d\xca\xbf\x5b\xd4\xcf\xbf\x0b\x47\xac\x2d\x59\x07\xfe\x7a\x49\xc1\x61\xa6\x24\x17\x2a\xf0\xbe\x2e\xdb\x17\x7f\xa0\x3c\x7d\x4b\xf3\xba\xdb\xc3\xed\x06\xee\xdb\x5e\xd7\xdd\x42\x5c\x47\xb2\xb3\x68\x75\x8c\xf2\xe1\x4f\x00\x00\x00\xff\xff\x4e\x9b\x8d\xdf\x17\x11\x00\x00") +var _jsonschemaDraft04JSON = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xc4\x57\x3d\x6f\xdb\x3c\x10\xde\xf3\x2b\x08\x26\x63\xf2\x2a\x2f\xd0\xc9\x5b\xd1\x2e\x01\x5a\x34\x43\x37\x23\x03\x6d\x9d\x6c\x06\x14\xa9\x50\x54\x60\xc3\xd0\x7f\x2f\x28\x4a\x14\x29\x91\x92\x2d\xa7\x8d\x97\x28\xbc\xaf\xe7\x8e\xf7\xc5\xd3\x0d\x42\x08\x61\x9a\xe2\x15\xc2\x7b\xa5\x8a\x55\x92\xbc\x96\x82\x3f\x94\xdb\x3d\xe4\xe4\x3f\x21\x77\x49\x2a\x49\xa6\x1e\x1e\xbf\x24\xe6\xec\x16\xdf\x1b\xa1\x3b\xf3\xff\x02\xc9\x14\xca\xad\xa4\x85\xa2\x82\x6b\xe9\x6f\x42\x02\x32\x2c\x28\x07\x45\x5a\x15\x3d\x77\x46\x39\xd5\xcc\x25\x5e\x21\x83\xb8\x21\x18\xb6\xaf\x52\x92\xa3\x47\x68\x88\xea\x58\x80\x56\x4e\x1a\xf2\xbd\x4f\xcc\x29\x7f\x52\x90\x6b\x7d\xff\x0f\x48\xb4\x3d\x3f\x21\x7c\x27\x21\xd3\x2a\x6e\x31\xaa\x2d\x53\xdd\xf3\xe3\x42\x94\x54\xd1\x77\x78\xe2\x0a\x76\x20\xe3\x20\x68\xcb\x30\x86\x41\xf3\x2a\xc7\x2b\xf4\x78\x8e\xfe\xef\x90\x91\x8a\xa9\xc7\xb1\x1d\xc2\xd8\x2f\x0d\x75\xed\xc1\x4e\x9c\xc8\x25\x43\xac\xa8\xbe\xd7\xcc\xa9\xd1\xa9\x21\xa0\x1a\xbd\x04\x61\x94\x34\x2f\x18\xfc\x3e\x16\x50\x8e\x4d\x03\x6f\x1c\x58\xdb\x48\x23\xbc\x11\x82\x01\xe1\xfa\xd3\x3a\x8e\x30\xaf\x18\x33\x7f\xf3\x8d\x39\x11\x9b\x57\xd8\x2a\xfd\x55\x2a\x49\xf9\x0e\xc7\xec\x37\xd4\x25\xf7\xec\x5c\x66\xc7\xd7\x99\xaa\xcf\x4f\x89\x8a\xd3\xb7\x0a\x3a\xaa\x92\x15\xf4\x30\x6f\x1c\xb0\xd6\x46\xe7\x98\x39\x2d\xa4\x28\x40\x2a\x3a\x88\x9e\x29\xba\x88\x37\x2d\xca\x60\x38\xfa\xba\x5b\x20\xac\xa8\x62\xb0\x4c\xd4\xaf\xda\x45\x0a\xba\x5c\x3b\xb9\xc7\x79\xc5\x14\x2d\x18\x34\x19\x1c\x51\xdb\x25\x4d\xb4\x7e\x06\x14\x38\x6c\x59\x55\xd2\x77\xf8\x69\x59\xfc\x7b\x73\xed\x93\x43\xcb\x32\x6d\x3c\x28\xdc\x1b\x9a\xd3\x62\xab\xc2\x27\xf7\x41\xc9\x08\x2b\x23\x08\xad\x13\x57\x21\x9c\xd3\x72\x0d\x42\x72\xf8\x01\x7c\xa7\xf6\x83\xce\x39\xd7\x82\x3c\x1f\x2f\xd6\x60\x1b\xa2\xdf\x35\x89\x52\x20\xe7\x73\x74\xe0\x66\x26\x64\x4e\xb4\x97\x58\xc2\x0e\x0e\xe1\x60\x92\x34\x6d\xa0\x10\xd6\xb5\x83\x61\x27\xe6\x47\xd3\x89\xbd\x63\xfd\x3b\x8d\x03\x3d\x6c\x42\x2d\x5b\x70\xee\xe8\xdf\x4b\xf4\x66\x4e\xe1\x01\x45\x17\x80\x74\xad\x4f\xc3\xf3\xae\xc6\x1d\xc6\xd7\xc2\xce\xc9\xe1\x29\x30\x86\x2f\x4a\xa6\x4b\x15\x84\x73\xc9\x6f\xfd\x7f\xa5\x6e\x9e\xbd\xf1\xb0\xd4\xdd\x45\x5a\xc2\x3e\x4b\x78\xab\xa8\x84\x74\x4a\x91\x3b\x92\x23\x05\xf2\x1c\x1e\x7b\xf3\x09\xf8\xcf\xab\x24\xb6\x60\xa2\xe8\x4c\x9f\x75\x77\xaa\x8c\xe6\x01\x45\x36\x86\xcf\xc3\x63\x3a\xea\xd4\x8d\x7e\x06\xac\x14\x0a\xe0\x29\xf0\xed\x07\x22\x1a\x65\xda\x44\xae\xa2\x73\x1a\xe6\x90\x69\xa2\x8c\x46\xb2\x2f\xde\x49\x38\x08\xed\xfe\xfd\x41\xaf\x9f\xa9\x55\xd7\xdd\x22\x8d\xfa\x45\x63\xc5\x0f\x80\xf3\xb4\x08\xd6\x79\x30\x9e\x93\xee\x59\xa6\xd0\x4b\xee\x22\xe3\x33\xc1\x3a\x27\x68\x36\x78\x7e\x87\x0a\x06\xd5\x2e\x20\xd3\xaf\x15\xfb\xd8\x3b\x73\x14\xbb\x92\xed\x05\x5d\x2e\x29\x38\x2c\x94\xe4\x42\x45\x5e\xd3\xb5\x7d\xdf\x47\xca\x38\xb4\x5c\xaf\xfb\x7d\xdd\x6d\xf4\xa1\x2d\x77\xdd\x2f\xce\x6d\xc4\x7b\x8b\x4e\x67\xa9\x6f\xfe\x04\x00\x00\xff\xff\xb1\xd1\x27\x78\x05\x11\x00\x00") func jsonschemaDraft04JSONBytes() ([]byte, error) { return bindataRead( @@ -84,12 +84,12 @@ func jsonschemaDraft04JSON() (*asset, error) { return nil, err } - info := bindataFileInfo{name: "jsonschema-draft-04.json", size: 4375, mode: os.FileMode(420), modTime: time.Unix(1482389892, 0)} + info := bindataFileInfo{name: "jsonschema-draft-04.json", size: 4357, mode: os.FileMode(420), modTime: time.Unix(1523760398, 0)} a := &asset{bytes: bytes, info: info} return a, nil } -var _v2SchemaJSON = []byte("\x1f\x8b\x08\x00\x00\x09\x6e\x88\x00\xff\xec\x5d\x4f\x93\xdb\x36\xb2\xbf\xfb\x53\xa0\x14\x57\xd9\xae\xd8\x92\xe3\xf7\x2e\xcf\x97\xd4\xbc\xd8\x49\x66\x37\x5e\x4f\x79\x26\xbb\x87\x78\x5c\x05\x91\x2d\x09\x09\x09\x30\x00\x38\x33\x5a\xef\x7c\xf7\x2d\xf0\x9f\x08\x02\x20\x41\x8a\xd2\xc8\x0e\x0f\xa9\x78\x28\xa0\xd1\xdd\x68\x34\x7e\xdd\xf8\xf7\xf9\x11\x42\x33\x49\x64\x04\xb3\xd7\x68\x76\x86\xfe\x76\xf9\xfe\x1f\xe8\x32\xd8\x40\x8c\xd1\x8a\x71\x74\x79\x8b\xd7\x6b\xe0\xe8\xd5\xfc\x25\x3a\xbb\x38\x9f\xcf\x9e\xab\x0a\x24\x54\xa5\x37\x52\x26\xaf\x17\x0b\x91\x17\x99\x13\xb6\xb8\x79\xb5\x10\x59\xdd\xf9\xef\x82\xd1\x6f\xf2\xc2\x8f\xf3\x4f\xb5\x1a\xea\xc7\x17\x45\x41\xc6\xd7\x8b\x90\xe3\x95\x7c\xf1\xf2\x7f\x8b\xca\x45\x3d\xb9\x4d\x32\xa6\xd8\xf2\x77\x08\x64\xfe\x8d\xc3\x9f\x29\xe1\xa0\x9a\xff\xed\x11\x42\x08\xcd\x8a\xd6\xb3\x9f\x15\x67\x74\xc5\xca\x7f\x27\x58\x6e\xc4\xec\x11\x42\xd7\x59\x5d\x1c\x86\x44\x12\x46\x71\x74\xc1\x59\x02\x5c\x12\x10\xb3\xd7\x68\x85\x23\x01\x59\x81\x04\x4b\x09\x9c\x6a\xbf\x7e\xce\x49\x7d\xba\x7b\x51\xfd\xa1\x44\xe2\xb0\x52\xac\x7d\xb3\x08\x61\x45\x68\x46\x56\x2c\x6e\x80\x86\x8c\xbf\xbd\x93\x40\x05\x61\x74\x96\x95\xbe\x7f\x84\xd0\x7d\x4e\xde\x42\xb7\xe4\xbe\x46\xbb\x14\x5b\x48\x4e\xe8\xba\x90\x05\xa1\x19\xd0\x34\xae\xc4\xce\xbe\xbc\x9a\xbf\x9c\x15\x7f\x5d\x57\xc5\x42\x10\x01\x27\x89\xe2\x48\x51\xb9\xda\x40\xd5\x87\x37\xc0\x15\x5f\x88\xad\x90\xdc\x10\x81\x42\x16\xa4\x31\x50\x39\x2f\x38\xad\xab\xb0\x53\xd8\xac\x94\x56\x6f\xc3\x84\xf4\x11\xa4\x50\xb3\xfa\xe9\xd3\x6f\x9f\x3e\xdf\x2f\xd0\xeb\x8f\x1f\x3f\x7e\xbc\xfe\xf6\xe9\xf7\xaf\x5f\x7f\xfc\x18\x7e\xfb\xec\xfb\xc7\xb3\x36\x79\x54\x43\xe8\x29\xc5\x31\x20\xc6\x11\x49\x9e\xe5\x12\x41\x66\xa0\xe8\xed\x1d\x8e\x93\x08\x5e\xa3\x27\x3b\xc3\x7c\xa2\x73\xba\xc4\x02\x2e\xb0\xdc\xf4\xe5\x76\xd1\xca\x96\xa2\x8a\x94\xcd\x21\xc9\x6c\xec\x2c\x70\x42\x9e\x34\x74\x9d\x19\x7c\xcd\x20\x9c\xea\x2e\x0a\xfe\x42\x84\xd4\x29\x04\x8c\x8a\xb4\x41\xa2\xc1\xdc\x19\x8a\x88\x90\x4a\x49\xef\xce\xdf\xbd\x45\x4a\x52\x81\x70\x10\x40\x22\x21\x44\xcb\x6d\xc5\xec\x4e\x3c\x1c\x45\xef\x57\x9a\xb5\x7d\xae\xfe\xe5\xe4\x31\x86\x90\xe0\xab\x6d\x02\x3b\x2e\xcb\x11\x90\xd9\xa8\xc6\x77\xc2\x59\x98\x06\xfd\xf9\x2e\x78\x45\x01\xa6\xa8\xa0\x71\x5c\xbe\x33\xa7\xd2\xd9\x5f\x95\xef\xd9\xd5\xac\xfd\xdc\x5d\xbf\x5e\xb8\xd1\x3e\xc7\x31\x48\xe0\x5e\x4c\x14\x65\xdf\xb8\xa8\x71\x10\x09\xa3\xc2\xc7\x02\xcb\xa2\x4e\x5a\x02\x82\x94\x13\xb9\xf5\x30\xe6\xb2\xa4\xb5\xfe\x9b\x3e\x7a\xb2\x55\xd2\xa8\x4a\xbc\x16\xb6\x71\x8e\x39\xc7\xdb\x9d\xe1\x10\x09\x71\xbd\x9c\xb3\x41\x89\xd7\xa5\x89\xdc\x57\xb5\x53\x4a\xfe\x4c\xe1\xbc\xa0\x21\x79\x0a\x1a\x0f\x70\xa7\x5c\x08\x8e\xde\xb0\xc0\x43\x24\xad\x74\x63\x0e\xb1\xd9\x90\xe1\xb0\x2d\x13\xa7\x6d\x78\xfd\x04\x14\x38\x8e\x90\xaa\xce\x63\xac\x3e\x23\xbc\x64\xa9\xb4\xf8\x03\x63\xde\xcd\xbe\x16\x13\x4a\x55\xac\x82\x12\xc6\xac\xd4\x35\xf7\x22\xd4\x3a\xff\x22\x73\x0e\x6e\x51\xa0\x75\x1e\xae\x8f\xe8\x5d\xc7\x59\xe6\xe4\x9a\x18\x8d\xd6\x1c\x53\x84\x4d\xb7\x67\x28\x37\x09\x84\x69\x88\x12\x0e\x01\x11\x80\x32\xa2\xf5\xb9\xaa\xc6\xd9\x73\x53\xab\xfb\xb4\x2e\x20\xc6\x54\x92\xa0\x9a\xf3\x69\x1a\x2f\x81\x77\x37\xae\x53\x1a\xce\x40\xc4\xa8\x82\x1c\xb5\xef\xda\x24\x7d\xb9\x61\x69\x14\xa2\x25\xa0\x90\xac\x56\xc0\x81\x4a\xb4\xe2\x2c\xce\x4a\x64\x7a\x9a\x23\xf4\x13\x91\x3f\xa7\x4b\xf4\x63\x84\x6f\x18\x87\x10\xbd\xc3\xfc\x8f\x90\xdd\x52\x44\x04\xc2\x51\xc4\x6e\x21\x74\x48\x21\x81\xc7\xe2\xfd\xea\x12\xf8\x0d\x09\xf6\xe9\x47\x35\xaf\x67\xc4\x14\xf7\x22\x27\x97\xe1\xe2\x76\x2d\x06\x8c\x4a\x1c\x48\x3f\x73\x2d\x0b\x5b\x29\x45\x24\x00\x2a\x0c\x11\xec\x94\xca\xc2\xa6\xc1\x37\x21\x43\x83\x3b\x5f\x97\xf1\x43\x5e\x53\x73\x19\xa5\x36\xd8\x2d\x05\x2e\x34\x0b\xeb\x39\xfc\x1d\x63\x51\x01\xbd\x3d\xbb\x90\x84\x40\x25\x59\x6d\x09\x5d\xa3\x1c\x37\xe6\x5c\x16\x9a\x40\x09\x70\xc1\xe8\x82\xf1\x35\xa6\xe4\xdf\x99\x5c\x8e\x9e\x4d\x79\xb4\x27\x2f\xbf\x7e\xf8\x05\x25\x8c\x50\xa9\x98\x29\x90\x62\x60\xea\x75\xae\x13\xca\xbf\x2b\x1a\x29\x27\x76\xd6\x20\xc6\x64\x5f\xe6\x32\x1a\x08\x87\x21\x07\x21\xbc\xb4\xe4\xe0\x32\x67\xa6\xcd\xf3\x1e\xcd\xd9\x6b\xb6\x6f\x8e\x27\xa7\xed\xdb\xe7\xbc\xcc\x1a\x07\xce\x6f\x87\x33\xf0\xba\x51\x17\x22\x66\x78\x79\x8e\xce\xe5\x13\x81\x80\x06\x2c\xe5\x78\x0d\xa1\xb2\xb8\x54\xa8\x79\x09\xbd\xbf\x3c\x47\x01\x8b\x13\x2c\xc9\x32\xaa\xaa\x1d\xd5\xee\xab\x36\xbd\x6c\xfd\x54\x6c\xc8\x08\x01\x3c\xbd\xe7\x07\x88\xb0\x24\x37\x79\x90\x28\x4a\x1d\x10\x1a\x92\x1b\x12\xa6\x38\x42\x40\xc3\x4c\x43\x62\x8e\xae\x36\xb0\x45\x71\x2a\xa4\x9a\x23\x79\x59\xb1\xa8\xf2\xa4\x0c\x60\x9f\xcc\x8d\x40\xf5\x80\xca\xa8\x99\xc3\xa7\x85\x1f\x31\x25\xa9\x82\xc5\x6d\xbd\xd8\x36\x76\x7c\x02\x28\x97\xf6\x1d\x74\x3b\x11\x7e\x91\xae\x32\xf8\x6c\xf4\xe6\x7b\x9a\xa5\x1f\x62\xc6\x21\xcf\x9a\xe5\xed\x8b\x02\xf3\x2c\x33\x33\xdf\x00\xca\xc9\x09\xb4\x04\xf5\xa5\x08\xd7\xc3\x02\x18\x66\xf1\xab\x1e\x83\x37\x4c\xcd\x12\xc1\x1d\x50\xf6\xaa\xbd\xfe\xe2\x73\x48\x38\x08\xa0\x32\x9b\x18\x44\x86\x0b\x6a\xc1\xaa\x26\x96\x2d\x96\x3c\xa0\x54\x65\x73\x87\x15\xca\x15\xe5\xf5\x94\x46\x9f\x33\x1a\x0c\x9a\xb1\x5a\xd9\x6a\x95\xcd\xcb\x7e\xec\x9a\xc5\x94\x3b\x37\x26\x31\xd7\xfc\xe4\x1f\x13\x8c\x31\x75\x9c\xba\xf7\x87\x3c\xa1\xb7\x4f\x17\x1b\x09\x82\x98\xc4\x70\x95\xd3\xe8\x4c\x48\x5a\xa6\xd6\x2a\x3d\x56\x42\x80\x9f\xaf\xae\x2e\x50\x0c\x42\xe0\x35\x34\x3c\x8a\x62\x03\x37\xba\xb2\x27\x04\xda\x25\x8d\x06\xe2\xa0\x13\x8a\xf3\xf5\xec\x10\x72\x67\x88\x90\x3d\x4b\x64\xeb\xaa\xda\x8f\xf7\x5a\x75\x47\x9a\xa8\x51\x70\x26\xd2\x38\xc6\x7c\xbb\x57\xfc\xbd\xe4\x04\x56\xa8\xa0\x54\x9a\x45\xd5\xf7\x0f\x16\xfc\x57\x1c\x3c\xdf\x23\xba\x77\x38\xda\x16\x4b\x31\x53\x6a\x4d\x9a\x15\x63\xe7\xe1\x18\x69\x9f\x22\xe0\x24\xbb\x94\x4b\x97\xee\x2d\xf9\x70\x87\x72\x7b\xe6\xc4\x33\x2a\x66\x5e\x1c\x35\x72\xe3\x2d\xda\x73\xe4\xc7\x51\x6d\xa4\xa1\x2a\x4f\xde\x94\xcb\xb2\x3e\x31\x48\xae\x82\xce\xc9\xc8\x65\xcd\xc3\xb7\x34\xb6\x2b\xdf\x58\x65\x78\x6e\x73\xac\x5e\x24\x0d\x3f\xdc\x70\x23\xc6\xda\x52\x0b\x2d\x63\x7d\xa9\x49\x2d\x54\x48\x28\xc0\x12\x9c\xe3\x63\xc9\x58\x04\x98\x36\x07\xc8\x0a\xa7\x91\xd4\xf0\xbc\xc1\xa8\xb9\x70\xd0\xc6\xa9\xb6\x78\x80\x5a\xa3\xb4\x2c\xf4\x18\x0b\x8a\x9d\xd0\xb4\x55\x10\xee\x0d\xc5\xd6\xe0\x99\x93\xdc\xa1\x04\xbb\xf1\xa7\x23\xd1\xd1\x97\x8c\x87\x13\x0a\x21\x02\xe9\x99\x25\xed\x20\xc5\x92\x66\x3c\x32\x9c\xd6\x06\xb0\x31\x5c\x86\x29\x0a\xcb\x60\x33\x12\xa5\x91\xfc\x96\x75\xd0\x59\xd7\x13\xbd\xd3\x23\x79\xdd\x2a\x90\xa6\x38\x06\x91\x39\x7f\x20\x72\x03\x1c\x2d\x01\x61\xba\x45\x37\x38\x22\x61\x8e\x71\x85\xc4\x32\x15\x28\x60\x61\x16\xb8\x3d\x29\xdc\x4d\x3d\x2f\x12\x13\x7d\xc8\x7e\x37\xee\xa8\x7f\xfa\xdb\xcb\x17\xff\x77\xfd\xf9\x7f\xee\x9f\x3d\xfe\xcf\xa7\xa7\x45\xfb\xcf\x1e\xf7\xf3\xe0\xff\xc4\x51\x0a\x8e\x4c\xcb\x01\xdc\x0a\x65\xb2\x01\x83\xed\x3d\xe4\xa9\xa3\x4e\x2d\x59\xc5\xe8\x2f\x48\x7d\x5a\x6e\x37\xbf\x5c\x9f\x35\x13\x64\x14\xfa\xef\x0b\x68\xa6\x0d\xb4\x8e\xf1\xa8\xff\xbb\x60\xf4\x03\x64\xab\x5b\x81\x65\x51\xe6\xda\xca\xfa\xf0\xb0\xac\x3e\x9c\xca\x26\x0e\x1d\xdb\x57\x5b\xbb\xb4\x9a\xa6\xb6\x9b\x1a\x6b\xd1\x9a\x9e\x7e\x33\x9a\xec\x41\x69\x45\x22\xb8\xb4\x51\xeb\x04\x77\xca\x6f\x7b\x7b\xc8\xb2\xb0\x95\x92\x25\x5b\xd0\x42\xaa\x2a\xdd\x32\x78\x4f\x0c\xab\x68\x46\x6c\xea\x6d\xf4\x5c\x5e\xde\xc4\xac\xa5\xf9\xd1\x00\x9f\x7d\x98\x65\x24\xbd\xc7\x97\xd4\xb3\x3a\xa8\x2b\xa0\x34\x76\xf9\x65\x5f\x2d\x25\x95\x1b\xcf\xd6\xf4\x9b\x5f\x09\x95\xb0\x36\x3f\xdb\xd0\x39\x2a\x93\x1c\x9d\x03\xa2\x4a\xca\xf5\xf6\x10\xb6\x94\x89\x0b\x6a\x70\x12\x13\x49\x6e\x40\xe4\x29\x12\x2b\xbd\x80\x45\x11\x04\xaa\xc2\x8f\x56\x9e\x5c\x6b\xec\x8d\x5a\x0e\x14\x59\x06\x2b\x1e\x24\xcb\xc2\x56\x4a\x31\xbe\x23\x71\x1a\xfb\x51\x2a\x0b\x3b\x1c\x48\x10\xa5\x82\xdc\xc0\xbb\x3e\x24\x8d\x5a\x76\x2e\x09\xed\xc1\x65\x51\xb8\x83\xcb\x3e\x24\x8d\x5a\x2e\x5d\xfe\x02\x74\x2d\x3d\xf1\xef\xae\xb8\x4b\xe6\x5e\xd4\xaa\xe2\x2e\x5c\x5e\xec\x0e\xf5\x5b\x0c\xcb\x0a\xbb\xa4\x3c\xf7\x1f\x2a\x55\x69\x97\x8c\x7d\x68\x95\xa5\xad\xb4\xf4\x9c\xa5\x07\xb9\x7a\x05\xbb\xad\x50\x6f\xfb\xa0\x4e\x9b\x48\x23\x49\x92\x28\x87\x19\x3e\x32\xee\xca\x3b\x46\x7e\x7f\x18\x64\xcc\xcc\x0f\x34\xe9\x36\x8b\xb7\x6c\xa8\xa5\x5b\x54\x4c\x54\x5b\x15\x3a\xf1\x6c\x2d\xfe\x96\xc8\x0d\xba\x7b\x81\x88\xc8\x23\xab\xee\x7d\x3b\x92\xa7\x60\x29\xe3\xdc\xff\xb8\x64\xe1\xf6\xa2\x5a\x59\xdc\x6f\xeb\x45\x7d\x6a\xd1\x76\x1e\xea\xb8\xf1\xfa\x14\xd3\x36\x63\xe5\xd7\xf3\xe4\xbe\x25\xbd\x5e\x05\xeb\x73\x74\xb5\x21\x2a\x2e\x4e\xa3\x30\xdf\xbf\x43\x28\x2a\xd1\xa5\x2a\x9d\x8a\xfd\x76\xd8\x8d\xbc\x67\x65\xc7\xb8\x03\x45\xec\xa3\xb0\x37\x8a\x70\x4c\x68\x91\x51\x8e\x58\x80\xed\x4a\xf3\x81\x62\xca\x96\xbb\xf1\x52\xcd\x80\xfb\xe4\x4a\x5d\x6c\xdf\x6e\x20\x4b\x80\x30\x8e\x28\x93\xf9\xe9\x8d\x8a\x6d\xd5\x59\x65\x7b\xaa\x44\x9e\xc0\xc2\xd1\x7c\x40\x26\xd6\x1a\xce\xf9\xc5\x69\x7b\x6c\xec\xc8\x71\x7b\xe5\x21\x2e\xd3\xe5\x65\x93\x91\x53\x0b\x7b\x3a\xc7\xfa\x17\x6a\x01\xa7\x33\xd0\xf4\x40\x0f\x39\x87\xda\xe4\x54\x87\x3a\xd5\xe3\xc7\xa6\x8e\x20\xd4\x11\xb2\x4e\xb1\xe9\x14\x9b\x4e\xb1\xe9\x14\x9b\xfe\x15\x63\xd3\x47\xf5\xff\x97\x38\xe9\xcf\x14\xf8\x76\x82\x49\x13\x4c\xaa\x7d\xcd\x6c\x62\x42\x49\x87\x43\x49\x19\x33\x6f\xe3\x44\x6e\x9b\xab\x8a\x3e\x86\xaa\x99\x52\x1b\x5b\x59\x33\x02\x09\xa0\x21\xa1\x6b\x84\x6b\x66\xbb\xdc\x16\x0c\xd3\x68\xab\xec\x36\x4b\xd8\x60\x8a\x40\x31\x85\x6e\x14\x57\x13\xc2\xfb\x92\x10\xde\xbf\x88\xdc\xbc\x53\x5e\x7f\x82\x7a\x13\xd4\x9b\xa0\xde\x04\xf5\x90\x01\xf5\x94\xcb\x7b\x83\x25\x9e\xd0\xde\x84\xf6\x6a\x5f\x4b\xb3\x98\x00\xdf\x04\xf8\x6c\xbc\x7f\x19\x80\xaf\xf1\x71\x45\x22\x98\x40\xe0\x04\x02\x27\x10\xd8\x29\xf5\x04\x02\xff\x4a\x20\x30\xc1\x72\xf3\x65\x02\x40\xd7\xc1\xd1\xe2\x6b\xf1\xa9\x7b\xfb\xe4\x20\xc0\x68\x9d\xd4\xb4\xd3\x96\xb5\xa6\xd1\x41\x20\xe6\x89\xc3\x48\x65\x58\x13\x84\x9c\x56\x56\x3b\x0c\xe0\x6b\x83\x5c\x13\xd2\x9a\x90\xd6\x84\xb4\x26\xa4\x85\x0c\xa4\x45\x19\xfd\xff\x63\x6c\x52\xb5\x1f\x1e\x19\x74\x3a\xcd\xb9\x69\xce\xa6\x3a\x0f\x7a\x2d\x19\xc7\x81\x14\x5d\xcb\xd5\x03\xc9\x39\xd0\xb0\xd1\xb3\xcd\xfb\x7a\x2d\x5d\x3a\x48\xe1\xfa\x2e\xe6\x81\x42\x18\x86\xd6\xc1\xbe\xb1\x23\xd3\xf7\x34\xed\x19\x0a\x0b\xc4\x48\x44\xfd\x22\x50\xb6\x42\x58\xbb\xe5\x3d\xa7\x73\xd4\x8b\xc4\x8c\x70\x61\xec\x73\xee\xc3\x81\x8b\xf5\xe2\xd7\x52\x3e\xcf\xeb\xeb\x17\x3b\x71\x16\xda\x7d\xb8\xde\xf0\x7a\x8f\x06\x2d\xa7\x40\x7b\xc1\x9d\x41\x4d\xb6\x61\xa2\x4e\x9f\x3d\xa0\xc5\xae\xe3\x1c\x1d\x40\x6c\x48\x8b\x63\xa0\xb5\x01\xed\x8e\x02\xe9\x86\xc8\x3b\x06\xee\xdb\x4b\xde\xbd\xc0\xa1\x6f\xcb\xda\xfc\xc2\x44\x16\x87\x9c\x17\x31\xd3\x30\x20\x39\x42\xcb\x6f\xf2\xf1\xf4\x72\x10\xf8\x1c\xa0\xf3\xbd\x10\xea\x21\x35\x7d\xe8\x86\xdb\x15\xed\x81\x81\x07\x28\xbb\x13\x28\xc7\xf8\xce\x7d\x8d\xc2\x31\xb4\x7e\x94\xd6\xdb\x55\xef\x4a\xfb\xed\xc3\x40\x3e\xeb\x9f\xe9\x99\x0f\xdf\x08\x65\x88\x27\x73\x86\x31\x9d\x47\xdf\x55\x19\xba\x3d\xee\x15\x0a\xcd\x8c\xaa\x5e\xb9\xf6\x57\x33\x73\x5a\xa1\x89\x7b\x3b\xa0\xb2\xa4\xc2\xf6\xc1\x53\xb5\x00\xca\x23\xe5\xf4\x60\x6a\xb4\x2d\x74\xea\x4e\xed\x3b\xe3\x47\xfb\xed\x82\x3d\x19\xd4\x3b\x6b\xaf\xae\x2b\x2f\x57\xb3\x82\x68\xcb\xed\x88\x2e\xe1\x5c\xd7\x26\xfa\x0a\x65\xe7\xce\x11\x33\xb4\xdd\x66\xe3\x37\xf6\xfa\x70\xd6\x4f\xa1\x21\x51\xd8\x3c\x26\x14\x4b\xc6\x87\x44\x27\x1c\x70\xf8\x9e\x46\xce\xab\x21\x07\x5f\xc1\x76\x17\x1b\x77\xb4\xda\x75\xa0\x0a\x3a\x30\xe1\xf8\x97\x32\x16\x2b\x00\x75\x85\xee\x62\x46\xef\xd3\x85\xb5\x6b\x60\xbe\xf2\x30\x7a\x8c\x0b\x4b\xa6\xd0\xf9\x64\x42\xe7\x07\x41\x41\xe3\x2c\x5d\xf9\x6d\xe9\x39\x98\x3b\x3b\x5d\x67\xd4\x5c\xed\xf2\xf0\x48\x7b\xbd\x2d\x31\xdd\x3f\x34\xad\x44\x76\x51\x9a\x56\x22\xa7\x95\xc8\x69\x25\xf2\xe1\x56\x22\x1f\x00\x32\x6a\x73\x92\xed\xe1\xc6\x7d\x9f\x49\x2c\x69\x7e\xc8\x31\x4c\x0c\xb4\xf2\x54\x3b\x79\x3b\x9e\x4d\xb4\xd1\x18\x3e\x5f\x9a\x93\xa2\x11\xc3\xda\x27\x0b\xaf\x37\x2e\x5c\x37\xfb\xeb\x9a\xd6\xc3\xac\xc3\xcc\xf8\x1e\x5b\x9d\xac\x22\x64\xb7\xed\x26\xb8\xf3\xb9\x3c\xbb\x1f\xe2\xb0\x22\x77\x43\x6a\x62\x29\x39\x59\xa6\xe6\xe5\xcd\x7b\x83\xc0\x5b\x8e\x93\x64\xac\xeb\xca\x4f\x65\xac\x4a\xbc\x1e\xcd\x82\xfa\x3c\x70\x36\xb6\xb5\xed\x79\xef\xec\x68\x00\xff\x54\xfa\xb5\xe3\xf1\xdb\xe1\xbe\xce\x76\x17\xaf\x57\xb6\x6b\x89\x05\x09\xce\x52\xb9\x01\x2a\x49\xbe\xd9\xf4\xd2\xb8\x7a\xbf\x91\x02\xf3\x22\x8c\x13\xf2\x77\xd8\x8e\x43\x8b\xe1\x54\x6e\x5e\x9d\xc7\x49\x44\x02\x22\xc7\xa4\x79\x81\x85\xb8\x65\x3c\x1c\x93\xe6\x59\xa2\xf8\x1c\x51\x95\x05\xd9\x20\x00\x21\x7e\x60\x21\x58\xa9\x56\xff\xbe\xb6\x5a\x5e\x5b\x3f\x1f\xd6\xd3\x3c\xc4\x4d\xba\x99\xb4\x63\x6e\x7d\x3e\x3d\x57\xd2\x18\x5f\x47\xe8\xc3\x06\x8a\x68\x6c\x7f\x3b\x72\x0f\xe7\xe2\x77\x77\xf1\xd0\x99\xab\xdf\x2e\xfe\xd6\xbb\xcd\x1a\xb9\x90\xd1\xaf\xf2\x38\x3d\xdb\x74\xf8\xeb\xe3\xda\xe8\x2a\x62\xb7\xda\x1b\x07\xa9\xdc\x30\x5e\xbc\x68\xfb\x6b\x9f\x97\xf1\xc6\xb1\xd8\x5c\x29\x1e\x49\x30\xc5\xf7\xde\xad\x91\x42\xf9\xdd\xed\x89\x80\x25\xbe\x37\xd7\xe7\x32\x5c\xe6\x35\xac\xd4\x0c\x2d\xf7\x90\xc4\xe3\xf5\xe3\x2f\x7f\x54\x18\x88\xe3\x61\x47\x85\x64\x7f\xc0\xd7\x3f\x1a\x92\x42\xe9\xc7\x1e\x0d\x95\x76\xa7\x51\xa0\x8f\x02\x1b\x46\x9e\x06\x42\xd1\xf2\x01\x07\x02\xde\xe9\x7d\x1a\x0b\xa7\x32\x16\xcc\xc0\xee\xc4\x90\xd2\x5f\x6f\x98\x54\x5d\xf2\x95\xe1\xa7\x69\x10\x3a\x06\xe1\x65\xb3\x17\x47\x58\x78\xd0\x45\xd6\x5b\xd5\x5f\x25\x1d\x71\x49\xa6\x7a\x64\xda\xd0\x6f\xc7\x3a\x4c\xe3\x09\xc0\x6e\x96\x2c\xa7\xa7\x77\x34\x10\x05\x08\x21\x44\x92\x65\x77\xdf\x20\x5c\xbc\xe7\x97\x3f\xf4\x1a\x45\xd6\xe7\x27\x4a\xde\x74\x27\x66\x11\x7d\x70\xba\xd3\x78\xf9\x1e\x0d\xca\xc8\x39\xde\x7c\xb3\xa6\xe1\xbc\xd7\xc1\x6a\x6f\xb3\x0e\x52\xbe\xe4\x98\x8a\x15\x70\x94\x70\x26\x59\xc0\xa2\xf2\x1c\xfb\xd9\xc5\xf9\xbc\xd5\x92\x9c\xa3\xdf\xe6\x1e\xb3\x0d\x49\xba\x87\x50\x5f\x84\xfe\xe9\xd6\xf8\xbb\xe6\xf0\x7a\xeb\xa6\x65\x3b\x86\x8b\x79\x93\xf5\x59\x20\x6e\xb4\xa7\x44\xf4\x3f\xa5\xfe\x67\x42\x12\xdb\xd3\xe7\xbb\xa5\xa3\x8c\x5c\x2b\x97\xbb\xbb\x7f\x8e\xc5\x6e\xed\x43\x5c\xbf\x74\xc8\x8f\xff\xe6\xd6\xbe\x91\xb6\xf5\x95\xe4\xed\x93\xc4\xa8\x5b\xf9\x76\x4d\x35\xb7\xd8\x8c\xb6\x7d\xaf\x72\xe0\xb6\xbd\x01\x63\x9e\x76\xab\x1a\x32\x76\xe4\x8c\x76\xc2\xad\x6c\xa2\x65\xf7\xcf\xf8\xa7\xda\x2a\xb9\x8c\x3d\x3c\xa3\x9d\x64\x33\xe5\x1a\xb5\x2d\xfb\x86\xa2\x5a\x7f\x19\x5b\x7f\xc6\x3f\xd1\x53\xd3\xe2\x41\x5b\xd3\x4f\xf0\xec\xb0\x42\x73\x43\xd2\x68\x27\xd3\x6a\x6a\x34\xf6\x4e\x1e\x52\x8b\x87\x6c\xcc\xae\x44\xfb\x9e\xa7\x51\x4f\x9d\x55\x03\x81\x8e\x67\xfc\xb4\x69\xf0\x3a\x18\xf2\x40\xd0\xf6\xa8\x34\xe3\xc9\x98\xaf\xf6\xda\x24\xd3\xeb\x60\xb9\x0e\xd3\x1f\xa9\xff\xee\x1f\xfd\x37\x00\x00\xff\xff\x69\x5d\x0a\x6a\x39\x9d\x00\x00") +var _v2SchemaJSON = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xec\x5d\x4f\x93\xdb\x36\xb2\xbf\xfb\x53\xa0\x14\x57\xd9\xae\xd8\x92\xe3\xf7\x2e\xcf\x97\xd4\xbc\xd8\x49\x66\x37\x5e\x4f\x79\x26\xbb\x87\x78\x5c\x05\x91\x2d\x09\x09\x09\x30\x00\x38\x33\x5a\xef\x7c\xf7\x2d\xf0\x9f\x08\x02\x20\x41\x8a\xd2\xc8\x0e\x0f\xa9\x78\x28\xa0\xd1\xdd\x68\x34\x7e\xdd\xf8\xf7\xf9\x11\x42\x33\x49\x64\x04\xb3\xd7\x68\x76\x86\xfe\x76\xf9\xfe\x1f\xe8\x32\xd8\x40\x8c\xd1\x8a\x71\x74\x79\x8b\xd7\x6b\xe0\xe8\xd5\xfc\x25\x3a\xbb\x38\x9f\xcf\x9e\xab\x0a\x24\x54\xa5\x37\x52\x26\xaf\x17\x0b\x91\x17\x99\x13\xb6\xb8\x79\xb5\x10\x59\xdd\xf9\xef\x82\xd1\x6f\xf2\xc2\x8f\xf3\x4f\xb5\x1a\xea\xc7\x17\x45\x41\xc6\xd7\x8b\x90\xe3\x95\x7c\xf1\xf2\x7f\x8b\xca\x45\x3d\xb9\x4d\x32\xa6\xd8\xf2\x77\x08\x64\xfe\x8d\xc3\x9f\x29\xe1\xa0\x9a\xff\xed\x11\x42\x08\xcd\x8a\xd6\xb3\x9f\x15\x67\x74\xc5\xca\x7f\x27\x58\x6e\xc4\xec\x11\x42\xd7\x59\x5d\x1c\x86\x44\x12\x46\x71\x74\xc1\x59\x02\x5c\x12\x10\xb3\xd7\x68\x85\x23\x01\x59\x81\x04\x4b\x09\x9c\x6a\xbf\x7e\xce\x49\x7d\xba\x7b\x51\xfd\xa1\x44\xe2\xb0\x52\xac\x7d\xb3\x08\x61\x45\x68\x46\x56\x2c\x6e\x80\x86\x8c\xbf\xbd\x93\x40\x05\x61\x74\x96\x95\xbe\x7f\x84\xd0\x7d\x4e\xde\x42\xb7\xe4\xbe\x46\xbb\x14\x5b\x48\x4e\xe8\xba\x90\x05\xa1\x19\xd0\x34\xae\xc4\xce\xbe\xbc\x9a\xbf\x9c\x15\x7f\x5d\x57\xc5\x42\x10\x01\x27\x89\xe2\x48\x51\xb9\xda\x40\xd5\x87\x37\xc0\x15\x5f\x88\xad\x90\xdc\x10\x81\x42\x16\xa4\x31\x50\x39\x2f\x38\xad\xab\xb0\x53\xd8\xac\x94\x56\x6f\xc3\x84\xf4\x11\xa4\x50\xb3\xfa\xe9\xd3\x6f\x9f\x3e\xdf\x2f\xd0\xeb\x8f\x1f\x3f\x7e\xbc\xfe\xf6\xe9\xf7\xaf\x5f\x7f\xfc\x18\x7e\xfb\xec\xfb\xc7\xb3\x36\x79\x54\x43\xe8\x29\xc5\x31\x20\xc6\x11\x49\x9e\xe5\x12\x41\x66\xa0\xe8\xed\x1d\x8e\x93\x08\x5e\xa3\x27\x3b\xc3\x7c\xa2\x73\xba\xc4\x02\x2e\xb0\xdc\xf4\xe5\x76\xd1\xca\x96\xa2\x8a\x94\xcd\x21\xc9\x6c\xec\x2c\x70\x42\x9e\x34\x74\x9d\x19\x7c\xcd\x20\x9c\xea\x2e\x0a\xfe\x42\x84\xd4\x29\x04\x8c\x8a\xb4\x41\xa2\xc1\xdc\x19\x8a\x88\x90\x4a\x49\xef\xce\xdf\xbd\x45\x4a\x52\x81\x70\x10\x40\x22\x21\x44\xcb\x6d\xc5\xec\x4e\x3c\x1c\x45\xef\x57\x9a\xb5\x7d\xae\xfe\xe5\xe4\x31\x86\x90\xe0\xab\x6d\x02\x3b\x2e\xcb\x11\x90\xd9\xa8\xc6\x77\xc2\x59\x98\x06\xfd\xf9\x2e\x78\x45\x01\xa6\xa8\xa0\x71\x5c\xbe\x33\xa7\xd2\xd9\x5f\x95\xef\xd9\xd5\xac\xfd\xdc\x5d\xbf\x5e\xb8\xd1\x3e\xc7\x31\x48\xe0\x5e\x4c\x14\x65\xdf\xb8\xa8\x71\x10\x09\xa3\xc2\xc7\x02\xcb\xa2\x4e\x5a\x02\x82\x94\x13\xb9\xf5\x30\xe6\xb2\xa4\xb5\xfe\x9b\x3e\x7a\xb2\x55\xd2\xa8\x4a\xbc\x16\xb6\x71\x8e\x39\xc7\xdb\x9d\xe1\x10\x09\x71\xbd\x9c\xb3\x41\x89\xd7\xa5\x89\xdc\x57\xb5\x53\x4a\xfe\x4c\xe1\xbc\xa0\x21\x79\x0a\x1a\x0f\x70\xa7\x5c\x08\x8e\xde\xb0\xc0\x43\x24\xad\x74\x63\x0e\xb1\xd9\x90\xe1\xb0\x2d\x13\xa7\x6d\x78\xfd\x04\x14\x38\x8e\x90\xaa\xce\x63\xac\x3e\x23\xbc\x64\xa9\xb4\xf8\x03\x63\xde\xcd\xbe\x16\x13\x4a\x55\xac\x82\x12\xc6\xac\xd4\x35\xf7\x22\xd4\x3a\xff\x22\x73\x0e\x6e\x51\xa0\x75\x1e\xae\x8f\xe8\x5d\xc7\x59\xe6\xe4\x9a\x18\x8d\xd6\x1c\x53\x84\x4d\xb7\x67\x28\x37\x09\x84\x69\x88\x12\x0e\x01\x11\x80\x32\xa2\xf5\xb9\xaa\xc6\xd9\x73\x53\xab\xfb\xb4\x2e\x20\xc6\x54\x92\xa0\x9a\xf3\x69\x1a\x2f\x81\x77\x37\xae\x53\x1a\xce\x40\xc4\xa8\x82\x1c\xb5\xef\xda\x24\x7d\xb9\x61\x69\x14\xa2\x25\xa0\x90\xac\x56\xc0\x81\x4a\xb4\xe2\x2c\xce\x4a\x64\x7a\x9a\x23\xf4\x13\x91\x3f\xa7\x4b\xf4\x63\x84\x6f\x18\x87\x10\xbd\xc3\xfc\x8f\x90\xdd\x52\x44\x04\xc2\x51\xc4\x6e\x21\x74\x48\x21\x81\xc7\xe2\xfd\xea\x12\xf8\x0d\x09\xf6\xe9\x47\x35\xaf\x67\xc4\x14\xf7\x22\x27\x97\xe1\xe2\x76\x2d\x06\x8c\x4a\x1c\x48\x3f\x73\x2d\x0b\x5b\x29\x45\x24\x00\x2a\x0c\x11\xec\x94\xca\xc2\xa6\xc1\x37\x21\x43\x83\x3b\x5f\x97\xf1\x43\x5e\x53\x73\x19\xa5\x36\xd8\x2d\x05\x2e\x34\x0b\xeb\x39\xfc\x1d\x63\x51\x01\xbd\x3d\xbb\x90\x84\x40\x25\x59\x6d\x09\x5d\xa3\x1c\x37\xe6\x5c\x16\x9a\x40\x09\x70\xc1\xe8\x82\xf1\x35\xa6\xe4\xdf\x99\x5c\x8e\x9e\x4d\x79\xb4\x27\x2f\xbf\x7e\xf8\x05\x25\x8c\x50\xa9\x98\x29\x90\x62\x60\xea\x75\xae\x13\xca\xbf\x2b\x1a\x29\x27\x76\xd6\x20\xc6\x64\x5f\xe6\x32\x1a\x08\x87\x21\x07\x21\xbc\xb4\xe4\xe0\x32\x67\xa6\xcd\xf3\x1e\xcd\xd9\x6b\xb6\x6f\x8e\x27\xa7\xed\xdb\xe7\xbc\xcc\x1a\x07\xce\x6f\x87\x33\xf0\xba\x51\x17\x22\x66\x78\x79\x8e\xce\xe5\x13\x81\x80\x06\x2c\xe5\x78\x0d\xa1\xb2\xb8\x54\xa8\x79\x09\xbd\xbf\x3c\x47\x01\x8b\x13\x2c\xc9\x32\xaa\xaa\x1d\xd5\xee\xab\x36\xbd\x6c\xfd\x54\x6c\xc8\x08\x01\x3c\xbd\xe7\x07\x88\xb0\x24\x37\x79\x90\x28\x4a\x1d\x10\x1a\x92\x1b\x12\xa6\x38\x42\x40\xc3\x4c\x43\x62\x8e\xae\x36\xb0\x45\x71\x2a\xa4\x9a\x23\x79\x59\xb1\xa8\xf2\xa4\x0c\x60\x9f\xcc\x8d\x40\xf5\x80\xca\xa8\x99\xc3\xa7\x85\x1f\x31\x25\xa9\x82\xc5\x6d\xbd\xd8\x36\x76\x7c\x02\x28\x97\xf6\x1d\x74\x3b\x11\x7e\x91\xae\x32\xf8\x6c\xf4\xe6\x7b\x9a\xa5\x1f\x62\xc6\x21\xcf\x9a\xe5\xed\x8b\x02\xf3\x2c\x33\x33\xdf\x00\xca\xc9\x09\xb4\x04\xf5\xa5\x08\xd7\xc3\x02\x18\x66\xf1\xab\x1e\x83\x37\x4c\xcd\x12\xc1\x1d\x50\xf6\xaa\xbd\xfe\xe2\x73\x48\x38\x08\xa0\x32\x9b\x18\x44\x86\x0b\x6a\xc1\xaa\x26\x96\x2d\x96\x3c\xa0\x54\x65\x73\x87\x15\xca\x15\xe5\xf5\x94\x46\x9f\x33\x1a\x0c\x9a\xb1\x5a\xd9\x6a\x95\xcd\xcb\x7e\xec\x9a\xc5\x94\x3b\x37\x26\x31\xd7\xfc\xe4\x1f\x13\x8c\x31\x75\x9c\xba\xf7\x87\x3c\xa1\xb7\x4f\x17\x1b\x09\x82\x98\xc4\x70\x95\xd3\xe8\x4c\x48\x5a\xa6\xd6\x2a\x3d\x56\x42\x80\x9f\xaf\xae\x2e\x50\x0c\x42\xe0\x35\x34\x3c\x8a\x62\x03\x37\xba\xb2\x27\x04\xda\x25\x8d\x06\xe2\xa0\x13\x8a\xf3\xf5\xec\x10\x72\x67\x88\x90\x3d\x4b\x64\xeb\xaa\xda\x8f\xf7\x5a\x75\x47\x9a\xa8\x51\x70\x26\xd2\x38\xc6\x7c\xbb\x57\xfc\xbd\xe4\x04\x56\xa8\xa0\x54\x9a\x45\xd5\xf7\x0f\x16\xfc\x57\x1c\x3c\xdf\x23\xba\x77\x38\xda\x16\x4b\x31\x53\x6a\x4d\x9a\x15\x63\xe7\xe1\x18\x69\x9f\x22\xe0\x24\xbb\x94\x4b\x97\xee\x2d\xf9\x70\x87\x72\x7b\xe6\xc4\x33\x2a\x66\x5e\x1c\x35\x72\xe3\x2d\xda\x73\xe4\xc7\x51\x6d\xa4\xa1\x2a\x4f\xde\x94\xcb\xb2\x3e\x31\x48\xae\x82\xce\xc9\xc8\x65\xcd\xc3\xb7\x34\xb6\x2b\xdf\x58\x65\x78\x6e\x73\xac\x5e\x24\x0d\x3f\xdc\x70\x23\xc6\xda\x52\x0b\x2d\x63\x7d\xa9\x49\x2d\x54\x48\x28\xc0\x12\x9c\xe3\x63\xc9\x58\x04\x98\x36\x07\xc8\x0a\xa7\x91\xd4\xf0\xbc\xc1\xa8\xb9\x70\xd0\xc6\xa9\xb6\x78\x80\x5a\xa3\xb4\x2c\xf4\x18\x0b\x8a\x9d\xd0\xb4\x55\x10\xee\x0d\xc5\xd6\xe0\x99\x93\xdc\xa1\x04\xbb\xf1\xa7\x23\xd1\xd1\x97\x8c\x87\x13\x0a\x21\x02\xe9\x99\x25\xed\x20\xc5\x92\x66\x3c\x32\x9c\xd6\x06\xb0\x31\x5c\x86\x29\x0a\xcb\x60\x33\x12\xa5\x91\xfc\x96\x75\xd0\x59\xd7\x13\xbd\xd3\x23\x79\xdd\x2a\x90\xa6\x38\x06\x91\x39\x7f\x20\x72\x03\x1c\x2d\x01\x61\xba\x45\x37\x38\x22\x61\x8e\x71\x85\xc4\x32\x15\x28\x60\x61\x16\xb8\x3d\x29\xdc\x4d\x3d\x2f\x12\x13\x7d\xc8\x7e\x37\xee\xa8\x7f\xfa\xdb\xcb\x17\xff\x77\xfd\xf9\x7f\xee\x9f\x3d\xfe\xcf\xa7\xa7\x45\xfb\xcf\x1e\xf7\xf3\xe0\xff\xc4\x51\x0a\x8e\x4c\xcb\x01\xdc\x0a\x65\xb2\x01\x83\xed\x3d\xe4\xa9\xa3\x4e\x2d\x59\xc5\xe8\x2f\x48\x7d\x5a\x6e\x37\xbf\x5c\x9f\x35\x13\x64\x14\xfa\xef\x0b\x68\xa6\x0d\xb4\x8e\xf1\xa8\xff\xbb\x60\xf4\x03\x64\xab\x5b\x81\x65\x51\xe6\xda\xca\xfa\xf0\xb0\xac\x3e\x9c\xca\x26\x0e\x1d\xdb\x57\x5b\xbb\xb4\x9a\xa6\xb6\x9b\x1a\x6b\xd1\x9a\x9e\x7e\x33\x9a\xec\x41\x69\x45\x22\xb8\xb4\x51\xeb\x04\x77\xca\x6f\x7b\x7b\xc8\xb2\xb0\x95\x92\x25\x5b\xd0\x42\xaa\x2a\xdd\x32\x78\x4f\x0c\xab\x68\x46\x6c\xea\x6d\xf4\x5c\x5e\xde\xc4\xac\xa5\xf9\xd1\x00\x9f\x7d\x98\x65\x24\xbd\xc7\x97\xd4\xb3\x3a\xa8\x2b\xa0\x34\x76\xf9\x65\x5f\x2d\x25\x95\x1b\xcf\xd6\xf4\x9b\x5f\x09\x95\xb0\x36\x3f\xdb\xd0\x39\x2a\x93\x1c\x9d\x03\xa2\x4a\xca\xf5\xf6\x10\xb6\x94\x89\x0b\x6a\x70\x12\x13\x49\x6e\x40\xe4\x29\x12\x2b\xbd\x80\x45\x11\x04\xaa\xc2\x8f\x56\x9e\x5c\x6b\xec\x8d\x5a\x0e\x14\x59\x06\x2b\x1e\x24\xcb\xc2\x56\x4a\x31\xbe\x23\x71\x1a\xfb\x51\x2a\x0b\x3b\x1c\x48\x10\xa5\x82\xdc\xc0\xbb\x3e\x24\x8d\x5a\x76\x2e\x09\xed\xc1\x65\x51\xb8\x83\xcb\x3e\x24\x8d\x5a\x2e\x5d\xfe\x02\x74\x2d\x3d\xf1\xef\xae\xb8\x4b\xe6\x5e\xd4\xaa\xe2\x2e\x5c\x5e\xec\x0e\xf5\x5b\x0c\xcb\x0a\xbb\xa4\x3c\xf7\x1f\x2a\x55\x69\x97\x8c\x7d\x68\x95\xa5\xad\xb4\xf4\x9c\xa5\x07\xb9\x7a\x05\xbb\xad\x50\x6f\xfb\xa0\x4e\x9b\x48\x23\x49\x92\x28\x87\x19\x3e\x32\xee\xca\x3b\x46\x7e\x7f\x18\x64\xcc\xcc\x0f\x34\xe9\x36\x8b\xb7\x6c\xa8\xa5\x5b\x54\x4c\x54\x5b\x15\x3a\xf1\x6c\x2d\xfe\x96\xc8\x0d\xba\x7b\x81\x88\xc8\x23\xab\xee\x7d\x3b\x92\xa7\x60\x29\xe3\xdc\xff\xb8\x64\xe1\xf6\xa2\x5a\x59\xdc\x6f\xeb\x45\x7d\x6a\xd1\x76\x1e\xea\xb8\xf1\xfa\x14\xd3\x36\x63\xe5\xd7\xf3\xe4\xbe\x25\xbd\x5e\x05\xeb\x73\x74\xb5\x21\x2a\x2e\x4e\xa3\x30\xdf\xbf\x43\x28\x2a\xd1\xa5\x2a\x9d\x8a\xfd\x76\xd8\x8d\xbc\x67\x65\xc7\xb8\x03\x45\xec\xa3\xb0\x37\x8a\x70\x4c\x68\x91\x51\x8e\x58\x80\xed\x4a\xf3\x81\x62\xca\x96\xbb\xf1\x52\xcd\x80\xfb\xe4\x4a\x5d\x6c\xdf\x6e\x20\x4b\x80\x30\x8e\x28\x93\xf9\xe9\x8d\x8a\x6d\xd5\x59\x65\x7b\xaa\x44\x9e\xc0\xc2\xd1\x7c\x40\x26\xd6\x1a\xce\xf9\xc5\x69\x7b\x6c\xec\xc8\x71\x7b\xe5\x21\x2e\xd3\xe5\x65\x93\x91\x53\x0b\x7b\x3a\xc7\xfa\x17\x6a\x01\xa7\x33\xd0\xf4\x40\x0f\x39\x87\xda\xe4\x54\x87\x3a\xd5\xe3\xc7\xa6\x8e\x20\xd4\x11\xb2\x4e\xb1\xe9\x14\x9b\x4e\xb1\xe9\x14\x9b\xfe\x15\x63\xd3\x47\xf5\xff\x97\x38\xe9\xcf\x14\xf8\x76\x82\x49\x13\x4c\xaa\x7d\xcd\x6c\x62\x42\x49\x87\x43\x49\x19\x33\x6f\xe3\x44\x6e\x9b\xab\x8a\x3e\x86\xaa\x99\x52\x1b\x5b\x59\x33\x02\x09\xa0\x21\xa1\x6b\x84\x6b\x66\xbb\xdc\x16\x0c\xd3\x68\xab\xec\x36\x4b\xd8\x60\x8a\x40\x31\x85\x6e\x14\x57\x13\xc2\xfb\x92\x10\xde\xbf\x88\xdc\xbc\x53\x5e\x7f\x82\x7a\x13\xd4\x9b\xa0\xde\x04\xf5\x90\x01\xf5\x94\xcb\x7b\x83\x25\x9e\xd0\xde\x84\xf6\x6a\x5f\x4b\xb3\x98\x00\xdf\x04\xf8\x6c\xbc\x7f\x19\x80\xaf\xf1\x71\x45\x22\x98\x40\xe0\x04\x02\x27\x10\xd8\x29\xf5\x04\x02\xff\x4a\x20\x30\xc1\x72\xf3\x65\x02\x40\xd7\xc1\xd1\xe2\x6b\xf1\xa9\x7b\xfb\xe4\x20\xc0\x68\x9d\xd4\xb4\xd3\x96\xb5\xa6\xd1\x41\x20\xe6\x89\xc3\x48\x65\x58\x13\x84\x9c\x56\x56\x3b\x0c\xe0\x6b\x83\x5c\x13\xd2\x9a\x90\xd6\x84\xb4\x26\xa4\x85\x0c\xa4\x45\x19\xfd\xff\x63\x6c\x52\xb5\x1f\x1e\x19\x74\x3a\xcd\xb9\x69\xce\xa6\x3a\x0f\x7a\x2d\x19\xc7\x81\x14\x5d\xcb\xd5\x03\xc9\x39\xd0\xb0\xd1\xb3\xcd\xfb\x7a\x2d\x5d\x3a\x48\xe1\xfa\x2e\xe6\x81\x42\x18\x86\xd6\xc1\xbe\xb1\x23\xd3\xf7\x34\xed\x19\x0a\x0b\xc4\x48\x44\xfd\x22\x50\xb6\x42\x58\xbb\xe5\x3d\xa7\x73\xd4\x8b\xc4\x8c\x70\x61\xec\x73\xee\xc3\x81\x8b\xf5\xe2\xd7\x52\x3e\xcf\xeb\xeb\x17\x3b\x71\x16\xda\x7d\xb8\xde\xf0\x7a\x8f\x06\x2d\xa7\x40\x7b\xc1\x9d\x41\x4d\xb6\x61\xa2\x4e\x9f\x3d\xa0\xc5\xae\xe3\x1c\x1d\x40\x6c\x48\x8b\x63\xa0\xb5\x01\xed\x8e\x02\xe9\x86\xc8\x3b\x06\xee\xdb\x4b\xde\xbd\xc0\xa1\x6f\xcb\xda\xfc\xc2\x44\x16\x87\x9c\x17\x31\xd3\x30\x20\x39\x42\xcb\x6f\xf2\xf1\xf4\x72\x10\xf8\x1c\xa0\xf3\xbd\x10\xea\x21\x35\x7d\xe8\x86\xdb\x15\xed\x81\x81\x07\x28\xbb\x13\x28\xc7\xf8\xce\x7d\x8d\xc2\x31\xb4\x7e\x94\xd6\xdb\x55\xef\x4a\xfb\xed\xc3\x40\x3e\xeb\x9f\xe9\x99\x0f\xdf\x08\x65\x88\x27\x73\x86\x31\x9d\x47\xdf\x55\x19\xba\x3d\xee\x15\x0a\xcd\x8c\xaa\x5e\xb9\xf6\x57\x33\x73\x5a\xa1\x89\x7b\x3b\xa0\xb2\xa4\xc2\xf6\xc1\x53\xb5\x00\xca\x23\xe5\xf4\x60\x6a\xb4\x2d\x74\xea\x4e\xed\x3b\xe3\x47\xfb\xed\x82\x3d\x19\xd4\x3b\x6b\xaf\xae\x2b\x2f\x57\xb3\x82\x68\xcb\xed\x88\x2e\xe1\x5c\xd7\x26\xfa\x0a\x65\xe7\xce\x11\x33\xb4\xdd\x66\xe3\x37\xf6\xfa\x70\xd6\x4f\xa1\x21\x51\xd8\x3c\x26\x14\x4b\xc6\x87\x44\x27\x1c\x70\xf8\x9e\x46\xce\xab\x21\x07\x5f\xc1\x76\x17\x1b\x77\xb4\xda\x75\xa0\x0a\x3a\x30\xe1\xf8\x97\x32\x16\x2b\x00\x75\x85\xee\x62\x46\xef\xd3\x85\xb5\x6b\x60\xbe\xf2\x30\x7a\x8c\x0b\x4b\xa6\xd0\xf9\x64\x42\xe7\x07\x41\x41\xe3\x2c\x5d\xf9\x6d\xe9\x39\x98\x3b\x3b\x5d\x67\xd4\x5c\xed\xf2\xf0\x48\x7b\xbd\x2d\x31\xdd\x3f\x34\xad\x44\x76\x51\x9a\x56\x22\xa7\x95\xc8\x69\x25\xf2\xe1\x56\x22\x1f\x00\x32\x6a\x73\x92\xed\xe1\xc6\x7d\x9f\x49\x2c\x69\x7e\xc8\x31\x4c\x0c\xb4\xf2\x54\x3b\x79\x3b\x9e\x4d\xb4\xd1\x18\x3e\x5f\x9a\x93\xa2\x11\xc3\xda\x27\x0b\xaf\x37\x2e\x5c\x37\xfb\xeb\x9a\xd6\xc3\xac\xc3\xcc\xf8\x1e\x5b\x9d\xac\x22\x64\xb7\xed\x26\xb8\xf3\xb9\x3c\xbb\x1f\xe2\xb0\x22\x77\x43\x6a\x62\x29\x39\x59\xa6\xe6\xe5\xcd\x7b\x83\xc0\x5b\x8e\x93\x64\xac\xeb\xca\x4f\x65\xac\x4a\xbc\x1e\xcd\x82\xfa\x3c\x70\x36\xb6\xb5\xed\x79\xef\xec\x68\x00\xff\x54\xfa\xb5\xe3\xf1\xdb\xe1\xbe\xce\x76\x17\xaf\x57\xb6\x6b\x89\x05\x09\xce\x52\xb9\x01\x2a\x49\xbe\xd9\xf4\xd2\xb8\x7a\xbf\x91\x02\xf3\x22\x8c\x13\xf2\x77\xd8\x8e\x43\x8b\xe1\x54\x6e\x5e\x9d\xc7\x49\x44\x02\x22\xc7\xa4\x79\x81\x85\xb8\x65\x3c\x1c\x93\xe6\x59\xa2\xf8\x1c\x51\x95\x05\xd9\x20\x00\x21\x7e\x60\x21\x58\xa9\x56\xff\xbe\xb6\x5a\x5e\x5b\x3f\x1f\xd6\xd3\x3c\xc4\x4d\xba\x99\xb4\x63\x6e\x7d\x3e\x3d\x57\xd2\x18\x5f\x47\xe8\xc3\x06\x8a\x68\x6c\x7f\x3b\x72\x0f\xe7\xe2\x77\x77\xf1\xd0\x99\xab\xdf\x2e\xfe\xd6\xbb\xcd\x1a\xb9\x90\xd1\xaf\xf2\x38\x3d\xdb\x74\xf8\xeb\xe3\xda\xe8\x2a\x62\xb7\xda\x1b\x07\xa9\xdc\x30\x5e\xbc\x68\xfb\x6b\x9f\x97\xf1\xc6\xb1\xd8\x5c\x29\x1e\x49\x30\xc5\xf7\xde\xad\x91\x42\xf9\xdd\xed\x89\x80\x25\xbe\x37\xd7\xe7\x32\x5c\xe6\x35\xac\xd4\x0c\x2d\xf7\x90\xc4\xe3\xf5\xe3\x2f\x7f\x54\x18\x88\xe3\x61\x47\x85\x64\x7f\xc0\xd7\x3f\x1a\x92\x42\xe9\xc7\x1e\x0d\x95\x76\xa7\x51\xa0\x8f\x02\x1b\x46\x9e\x06\x42\xd1\xf2\x01\x07\x02\xde\xe9\x7d\x1a\x0b\xa7\x32\x16\xcc\xc0\xee\xc4\x90\xd2\x5f\x6f\x98\x54\x5d\xf2\x95\xe1\xa7\x69\x10\x3a\x06\xe1\x65\xb3\x17\x47\x58\x78\xd0\x45\xd6\x5b\xd5\x5f\x25\x1d\x71\x49\xa6\x7a\x64\xda\xd0\x6f\xc7\x3a\x4c\xe3\x09\xc0\x6e\x96\x2c\xa7\xa7\x77\x34\x10\x05\x08\x21\x44\x92\x65\x77\xdf\x20\x5c\xbc\xe7\x97\x3f\xf4\x1a\x45\xd6\xe7\x27\x4a\xde\x74\x27\x66\x11\x7d\x70\xba\xd3\x78\xf9\x1e\x0d\xca\xc8\x39\xde\x7c\xb3\xa6\xe1\xbc\xd7\xc1\x6a\x6f\xb3\x0e\x52\xbe\xe4\x98\x8a\x15\x70\x94\x70\x26\x59\xc0\xa2\xf2\x1c\xfb\xd9\xc5\xf9\xbc\xd5\x92\x9c\xa3\xdf\xe6\x1e\xb3\x0d\x49\xba\x87\x50\x5f\x84\xfe\xe9\xd6\xf8\xbb\xe6\xf0\x7a\xeb\xa6\x65\x3b\x86\x8b\x79\x93\xf5\x59\x20\x6e\xb4\xa7\x44\xf4\x3f\xa5\xfe\x67\x42\x12\xdb\xd3\xe7\xbb\xa5\xa3\x8c\x5c\x2b\x97\xbb\xbb\x7f\x8e\xc5\x6e\xed\x43\x5c\xbf\x74\xc8\x8f\xff\xe6\xd6\xbe\x91\xb6\xf5\x95\xe4\xed\x93\xc4\xa8\x5b\xf9\x76\x4d\x35\xb7\xd8\x8c\xb6\x7d\xaf\x72\xe0\xb6\xbd\x01\x63\x9e\x76\xab\x1a\x32\x76\xe4\x8c\x76\xc2\xad\x6c\xa2\x65\xf7\xcf\xf8\xa7\xda\x2a\xb9\x8c\x3d\x3c\xa3\x9d\x64\x33\xe5\x1a\xb5\x2d\xfb\x86\xa2\x5a\x7f\x19\x5b\x7f\xc6\x3f\xd1\x53\xd3\xe2\x41\x5b\xd3\x4f\xf0\xec\xb0\x42\x73\x43\xd2\x68\x27\xd3\x6a\x6a\x34\xf6\x4e\x1e\x52\x8b\x87\x6c\xcc\xae\x44\xfb\x9e\xa7\x51\x4f\x9d\x55\x03\x81\x8e\x67\xfc\xb4\x69\xf0\x3a\x18\xf2\x40\xd0\xf6\xa8\x34\xe3\xc9\x98\xaf\xf6\xda\x24\xd3\xeb\x60\xb9\x0e\xd3\x1f\xa9\xff\xee\x1f\xfd\x37\x00\x00\xff\xff\x69\x5d\x0a\x6a\x39\x9d\x00\x00") func v2SchemaJSONBytes() ([]byte, error) { return bindataRead( @@ -104,7 +104,7 @@ func v2SchemaJSON() (*asset, error) { return nil, err } - info := bindataFileInfo{name: "v2/schema.json", size: 40249, mode: os.FileMode(420), modTime: time.Unix(1482389892, 0)} + info := bindataFileInfo{name: "v2/schema.json", size: 40249, mode: os.FileMode(420), modTime: time.Unix(1523760397, 0)} a := &asset{bytes: bytes, info: info} return a, nil } diff --git a/vendor/github.com/go-openapi/spec/expander.go b/vendor/github.com/go-openapi/spec/expander.go index cebb5cc5935..ad1529db5f2 100644 --- a/vendor/github.com/go-openapi/spec/expander.go +++ b/vendor/github.com/go-openapi/spec/expander.go @@ -491,7 +491,7 @@ func ExpandSpec(spec *Swagger, options *ExpandOptions) error { for key, definition := range spec.Definitions { var def *Schema var err error - if def, err = expandSchema(definition, []string{fmt.Sprintf("#/defintions/%s", key)}, resolver, specBasePath); shouldStopOnError(err, resolver.options) { + if def, err = expandSchema(definition, []string{fmt.Sprintf("#/definitions/%s", key)}, resolver, specBasePath); shouldStopOnError(err, resolver.options) { return err } if def != nil { @@ -679,6 +679,12 @@ func expandSchema(target Schema, parentRefs []string, resolver *schemaLoader, ba if t != nil { parentRefs = append(parentRefs, normalizedRef.String()) + var err error + resolver, err = transitiveResolver(basePath, target.Ref, resolver) + if shouldStopOnError(err, resolver.options) { + return nil, err + } + return expandSchema(*t, parentRefs, resolver, normalizedBasePath) } } @@ -814,6 +820,13 @@ func expandPathItem(pathItem *PathItem, resolver *schemaLoader, basePath string) if err := derefPathItem(pathItem, parentRefs, resolver, basePath); shouldStopOnError(err, resolver.options) { return err } + if pathItem.Ref.String() != "" { + var err error + resolver, err = transitiveResolver(basePath, pathItem.Ref, resolver) + if shouldStopOnError(err, resolver.options) { + return err + } + } pathItem.Ref = Ref{} parentRefs = parentRefs[0:] @@ -874,6 +887,28 @@ func expandOperation(op *Operation, resolver *schemaLoader, basePath string) err return nil } +func transitiveResolver(basePath string, ref Ref, resolver *schemaLoader) (*schemaLoader, error) { + if ref.IsRoot() || ref.HasFragmentOnly { + return resolver, nil + } + + baseRef, _ := NewRef(basePath) + currentRef := normalizeFileRef(&ref, basePath) + // Set a new root to resolve against + if !strings.HasPrefix(currentRef.String(), baseRef.String()) { + rootURL := currentRef.GetURL() + rootURL.Fragment = "" + root, _ := resolver.cache.Get(rootURL.String()) + var err error + resolver, err = defaultSchemaLoader(root, resolver.options, resolver.cache) + if err != nil { + return nil, err + } + } + + return resolver, nil +} + // ExpandResponse expands a response based on a basepath // This is the exported version of expandResponse // all refs inside response will be resolved relative to basePath @@ -922,6 +957,13 @@ func expandResponse(response *Response, resolver *schemaLoader, basePath string) if err := derefResponse(response, parentRefs, resolver, basePath); shouldStopOnError(err, resolver.options) { return err } + if response.Ref.String() != "" { + var err error + resolver, err = transitiveResolver(basePath, response.Ref, resolver) + if shouldStopOnError(err, resolver.options) { + return err + } + } response.Ref = Ref{} parentRefs = parentRefs[0:] @@ -984,6 +1026,13 @@ func expandParameter(parameter *Parameter, resolver *schemaLoader, basePath stri if err := derefParameter(parameter, parentRefs, resolver, basePath); shouldStopOnError(err, resolver.options) { return err } + if parameter.Ref.String() != "" { + var err error + resolver, err = transitiveResolver(basePath, parameter.Ref, resolver) + if shouldStopOnError(err, resolver.options) { + return err + } + } parameter.Ref = Ref{} parentRefs = parentRefs[0:] diff --git a/vendor/github.com/go-openapi/swag/json.go b/vendor/github.com/go-openapi/swag/json.go index 17ab0f61691..274331ef1d5 100644 --- a/vendor/github.com/go-openapi/swag/json.go +++ b/vendor/github.com/go-openapi/swag/json.go @@ -26,6 +26,9 @@ import ( "github.com/mailru/easyjson/jwriter" ) +// nullJSON represents a JSON object with null type +var nullJSON = []byte("null") + // DefaultJSONNameProvider the default cache for types var DefaultJSONNameProvider = NewNameProvider() @@ -90,16 +93,29 @@ func ConcatJSON(blobs ...[]byte) []byte { if len(blobs) == 0 { return nil } - if len(blobs) == 1 { + + last := len(blobs) - 1 + for blobs[last] == nil || bytes.Equal(blobs[last], nullJSON) { + // strips trailing null objects + last = last - 1 + if last < 0 { + // there was nothing but "null"s or nil... + return nil + } + } + if last == 0 { return blobs[0] } - last := len(blobs) - 1 var opening, closing byte var idx, a int buf := bytes.NewBuffer(nil) - for i, b := range blobs { + for i, b := range blobs[:last+1] { + if b == nil || bytes.Equal(b, nullJSON) { + // a null object is in the list: skip it + continue + } if len(b) > 0 && opening == 0 { // is this an array or an object? opening, closing = b[0], closers[b[0]] } diff --git a/vendor/github.com/golang/protobuf/proto/Makefile b/vendor/github.com/golang/protobuf/proto/Makefile deleted file mode 100644 index e2e0651a934..00000000000 --- a/vendor/github.com/golang/protobuf/proto/Makefile +++ /dev/null @@ -1,43 +0,0 @@ -# Go support for Protocol Buffers - Google's data interchange format -# -# Copyright 2010 The Go Authors. All rights reserved. -# https://github.com/golang/protobuf -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above -# copyright notice, this list of conditions and the following disclaimer -# in the documentation and/or other materials provided with the -# distribution. -# * Neither the name of Google Inc. nor the names of its -# contributors may be used to endorse or promote products derived from -# this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -install: - go install - -test: install generate-test-pbs - go test - - -generate-test-pbs: - make install - make -C testdata - protoc --go_out=Mtestdata/test.proto=github.com/golang/protobuf/proto/testdata,Mgoogle/protobuf/any.proto=github.com/golang/protobuf/ptypes/any:. proto3_proto/proto3.proto - make diff --git a/vendor/github.com/golang/protobuf/proto/clone.go b/vendor/github.com/golang/protobuf/proto/clone.go index e392575b353..3cd3249f706 100644 --- a/vendor/github.com/golang/protobuf/proto/clone.go +++ b/vendor/github.com/golang/protobuf/proto/clone.go @@ -35,22 +35,39 @@ package proto import ( + "fmt" "log" "reflect" "strings" ) // Clone returns a deep copy of a protocol buffer. -func Clone(pb Message) Message { - in := reflect.ValueOf(pb) +func Clone(src Message) Message { + in := reflect.ValueOf(src) if in.IsNil() { - return pb + return src } - out := reflect.New(in.Type().Elem()) - // out is empty so a merge is a deep copy. - mergeStruct(out.Elem(), in.Elem()) - return out.Interface().(Message) + dst := out.Interface().(Message) + Merge(dst, src) + return dst +} + +// Merger is the interface representing objects that can merge messages of the same type. +type Merger interface { + // Merge merges src into this message. + // Required and optional fields that are set in src will be set to that value in dst. + // Elements of repeated fields will be appended. + // + // Merge may panic if called with a different argument type than the receiver. + Merge(src Message) +} + +// generatedMerger is the custom merge method that generated protos will have. +// We must add this method since a generate Merge method will conflict with +// many existing protos that have a Merge data field already defined. +type generatedMerger interface { + XXX_Merge(src Message) } // Merge merges src into dst. @@ -58,17 +75,24 @@ func Clone(pb Message) Message { // Elements of repeated fields will be appended. // Merge panics if src and dst are not the same type, or if dst is nil. func Merge(dst, src Message) { + if m, ok := dst.(Merger); ok { + m.Merge(src) + return + } + in := reflect.ValueOf(src) out := reflect.ValueOf(dst) if out.IsNil() { panic("proto: nil destination") } if in.Type() != out.Type() { - // Explicit test prior to mergeStruct so that mistyped nils will fail - panic("proto: type mismatch") + panic(fmt.Sprintf("proto.Merge(%T, %T) type mismatch", dst, src)) } if in.IsNil() { - // Merging nil into non-nil is a quiet no-op + return // Merge from nil src is a noop + } + if m, ok := dst.(generatedMerger); ok { + m.XXX_Merge(src) return } mergeStruct(out.Elem(), in.Elem()) @@ -84,7 +108,7 @@ func mergeStruct(out, in reflect.Value) { mergeAny(out.Field(i), in.Field(i), false, sprop.Prop[i]) } - if emIn, ok := extendable(in.Addr().Interface()); ok { + if emIn, err := extendable(in.Addr().Interface()); err == nil { emOut, _ := extendable(out.Addr().Interface()) mIn, muIn := emIn.extensionsRead() if mIn != nil { diff --git a/vendor/github.com/golang/protobuf/proto/decode.go b/vendor/github.com/golang/protobuf/proto/decode.go index aa207298f99..d9aa3c42d66 100644 --- a/vendor/github.com/golang/protobuf/proto/decode.go +++ b/vendor/github.com/golang/protobuf/proto/decode.go @@ -39,8 +39,6 @@ import ( "errors" "fmt" "io" - "os" - "reflect" ) // errOverflow is returned when an integer is too large to be represented. @@ -50,10 +48,6 @@ var errOverflow = errors.New("proto: integer overflow") // wire type is encountered. It does not get returned to user code. var ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for oneof") -// The fundamental decoders that interpret bytes on the wire. -// Those that take integer types all return uint64 and are -// therefore of type valueDecoder. - // DecodeVarint reads a varint-encoded integer from the slice. // It returns the integer and the number of bytes consumed, or // zero if there is not enough. @@ -267,9 +261,6 @@ func (p *Buffer) DecodeZigzag32() (x uint64, err error) { return } -// These are not ValueDecoders: they produce an array of bytes or a string. -// bytes, embedded messages - // DecodeRawBytes reads a count-delimited byte buffer from the Buffer. // This is the format used for the bytes protocol buffer // type and for embedded messages. @@ -311,81 +302,29 @@ func (p *Buffer) DecodeStringBytes() (s string, err error) { return string(buf), nil } -// Skip the next item in the buffer. Its wire type is decoded and presented as an argument. -// If the protocol buffer has extensions, and the field matches, add it as an extension. -// Otherwise, if the XXX_unrecognized field exists, append the skipped data there. -func (o *Buffer) skipAndSave(t reflect.Type, tag, wire int, base structPointer, unrecField field) error { - oi := o.index - - err := o.skip(t, tag, wire) - if err != nil { - return err - } - - if !unrecField.IsValid() { - return nil - } - - ptr := structPointer_Bytes(base, unrecField) - - // Add the skipped field to struct field - obuf := o.buf - - o.buf = *ptr - o.EncodeVarint(uint64(tag<<3 | wire)) - *ptr = append(o.buf, obuf[oi:o.index]...) - - o.buf = obuf - - return nil -} - -// Skip the next item in the buffer. Its wire type is decoded and presented as an argument. -func (o *Buffer) skip(t reflect.Type, tag, wire int) error { - - var u uint64 - var err error - - switch wire { - case WireVarint: - _, err = o.DecodeVarint() - case WireFixed64: - _, err = o.DecodeFixed64() - case WireBytes: - _, err = o.DecodeRawBytes(false) - case WireFixed32: - _, err = o.DecodeFixed32() - case WireStartGroup: - for { - u, err = o.DecodeVarint() - if err != nil { - break - } - fwire := int(u & 0x7) - if fwire == WireEndGroup { - break - } - ftag := int(u >> 3) - err = o.skip(t, ftag, fwire) - if err != nil { - break - } - } - default: - err = fmt.Errorf("proto: can't skip unknown wire type %d for %s", wire, t) - } - return err -} - // Unmarshaler is the interface representing objects that can -// unmarshal themselves. The method should reset the receiver before -// decoding starts. The argument points to data that may be +// unmarshal themselves. The argument points to data that may be // overwritten, so implementations should not keep references to the // buffer. +// Unmarshal implementations should not clear the receiver. +// Any unmarshaled data should be merged into the receiver. +// Callers of Unmarshal that do not want to retain existing data +// should Reset the receiver before calling Unmarshal. type Unmarshaler interface { Unmarshal([]byte) error } +// newUnmarshaler is the interface representing objects that can +// unmarshal themselves. The semantics are identical to Unmarshaler. +// +// This exists to support protoc-gen-go generated messages. +// The proto package will stop type-asserting to this interface in the future. +// +// DO NOT DEPEND ON THIS. +type newUnmarshaler interface { + XXX_Unmarshal([]byte) error +} + // Unmarshal parses the protocol buffer representation in buf and places the // decoded result in pb. If the struct underlying pb does not match // the data in buf, the results can be unpredictable. @@ -395,7 +334,13 @@ type Unmarshaler interface { // to preserve and append to existing data. func Unmarshal(buf []byte, pb Message) error { pb.Reset() - return UnmarshalMerge(buf, pb) + if u, ok := pb.(newUnmarshaler); ok { + return u.XXX_Unmarshal(buf) + } + if u, ok := pb.(Unmarshaler); ok { + return u.Unmarshal(buf) + } + return NewBuffer(buf).Unmarshal(pb) } // UnmarshalMerge parses the protocol buffer representation in buf and @@ -405,8 +350,16 @@ func Unmarshal(buf []byte, pb Message) error { // UnmarshalMerge merges into existing data in pb. // Most code should use Unmarshal instead. func UnmarshalMerge(buf []byte, pb Message) error { - // If the object can unmarshal itself, let it. + if u, ok := pb.(newUnmarshaler); ok { + return u.XXX_Unmarshal(buf) + } if u, ok := pb.(Unmarshaler); ok { + // NOTE: The history of proto have unfortunately been inconsistent + // whether Unmarshaler should or should not implicitly clear itself. + // Some implementations do, most do not. + // Thus, calling this here may or may not do what people want. + // + // See https://github.com/golang/protobuf/issues/424 return u.Unmarshal(buf) } return NewBuffer(buf).Unmarshal(pb) @@ -422,12 +375,17 @@ func (p *Buffer) DecodeMessage(pb Message) error { } // DecodeGroup reads a tag-delimited group from the Buffer. +// StartGroup tag is already consumed. This function consumes +// EndGroup tag. func (p *Buffer) DecodeGroup(pb Message) error { - typ, base, err := getbase(pb) - if err != nil { - return err + b := p.buf[p.index:] + x, y := findEndGroup(b) + if x < 0 { + return io.ErrUnexpectedEOF } - return p.unmarshalType(typ.Elem(), GetProperties(typ.Elem()), true, base) + err := Unmarshal(b[:x], pb) + p.index += y + return err } // Unmarshal parses the protocol buffer representation in the @@ -438,533 +396,33 @@ func (p *Buffer) DecodeGroup(pb Message) error { // Unlike proto.Unmarshal, this does not reset pb before starting to unmarshal. func (p *Buffer) Unmarshal(pb Message) error { // If the object can unmarshal itself, let it. - if u, ok := pb.(Unmarshaler); ok { - err := u.Unmarshal(p.buf[p.index:]) + if u, ok := pb.(newUnmarshaler); ok { + err := u.XXX_Unmarshal(p.buf[p.index:]) p.index = len(p.buf) return err } - - typ, base, err := getbase(pb) - if err != nil { - return err - } - - err = p.unmarshalType(typ.Elem(), GetProperties(typ.Elem()), false, base) - - if collectStats { - stats.Decode++ - } - - return err -} - -// unmarshalType does the work of unmarshaling a structure. -func (o *Buffer) unmarshalType(st reflect.Type, prop *StructProperties, is_group bool, base structPointer) error { - var state errorState - required, reqFields := prop.reqCount, uint64(0) - - var err error - for err == nil && o.index < len(o.buf) { - oi := o.index - var u uint64 - u, err = o.DecodeVarint() - if err != nil { - break - } - wire := int(u & 0x7) - if wire == WireEndGroup { - if is_group { - if required > 0 { - // Not enough information to determine the exact field. - // (See below.) - return &RequiredNotSetError{"{Unknown}"} - } - return nil // input is satisfied - } - return fmt.Errorf("proto: %s: wiretype end group for non-group", st) - } - tag := int(u >> 3) - if tag <= 0 { - return fmt.Errorf("proto: %s: illegal tag %d (wire type %d)", st, tag, wire) - } - fieldnum, ok := prop.decoderTags.get(tag) - if !ok { - // Maybe it's an extension? - if prop.extendable { - if e, _ := extendable(structPointer_Interface(base, st)); isExtensionField(e, int32(tag)) { - if err = o.skip(st, tag, wire); err == nil { - extmap := e.extensionsWrite() - ext := extmap[int32(tag)] // may be missing - ext.enc = append(ext.enc, o.buf[oi:o.index]...) - extmap[int32(tag)] = ext - } - continue - } - } - // Maybe it's a oneof? - if prop.oneofUnmarshaler != nil { - m := structPointer_Interface(base, st).(Message) - // First return value indicates whether tag is a oneof field. - ok, err = prop.oneofUnmarshaler(m, tag, wire, o) - if err == ErrInternalBadWireType { - // Map the error to something more descriptive. - // Do the formatting here to save generated code space. - err = fmt.Errorf("bad wiretype for oneof field in %T", m) - } - if ok { - continue - } - } - err = o.skipAndSave(st, tag, wire, base, prop.unrecField) - continue - } - p := prop.Prop[fieldnum] - - if p.dec == nil { - fmt.Fprintf(os.Stderr, "proto: no protobuf decoder for %s.%s\n", st, st.Field(fieldnum).Name) - continue - } - dec := p.dec - if wire != WireStartGroup && wire != p.WireType { - if wire == WireBytes && p.packedDec != nil { - // a packable field - dec = p.packedDec - } else { - err = fmt.Errorf("proto: bad wiretype for field %s.%s: got wiretype %d, want %d", st, st.Field(fieldnum).Name, wire, p.WireType) - continue - } - } - decErr := dec(o, p, base) - if decErr != nil && !state.shouldContinue(decErr, p) { - err = decErr - } - if err == nil && p.Required { - // Successfully decoded a required field. - if tag <= 64 { - // use bitmap for fields 1-64 to catch field reuse. - var mask uint64 = 1 << uint64(tag-1) - if reqFields&mask == 0 { - // new required field - reqFields |= mask - required-- - } - } else { - // This is imprecise. It can be fooled by a required field - // with a tag > 64 that is encoded twice; that's very rare. - // A fully correct implementation would require allocating - // a data structure, which we would like to avoid. - required-- - } - } - } - if err == nil { - if is_group { - return io.ErrUnexpectedEOF - } - if state.err != nil { - return state.err - } - if required > 0 { - // Not enough information to determine the exact field. If we use extra - // CPU, we could determine the field only if the missing required field - // has a tag <= 64 and we check reqFields. - return &RequiredNotSetError{"{Unknown}"} - } - } - return err -} - -// Individual type decoders -// For each, -// u is the decoded value, -// v is a pointer to the field (pointer) in the struct - -// Sizes of the pools to allocate inside the Buffer. -// The goal is modest amortization and allocation -// on at least 16-byte boundaries. -const ( - boolPoolSize = 16 - uint32PoolSize = 8 - uint64PoolSize = 4 -) - -// Decode a bool. -func (o *Buffer) dec_bool(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - if len(o.bools) == 0 { - o.bools = make([]bool, boolPoolSize) - } - o.bools[0] = u != 0 - *structPointer_Bool(base, p.field) = &o.bools[0] - o.bools = o.bools[1:] - return nil -} - -func (o *Buffer) dec_proto3_bool(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - *structPointer_BoolVal(base, p.field) = u != 0 - return nil -} - -// Decode an int32. -func (o *Buffer) dec_int32(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - word32_Set(structPointer_Word32(base, p.field), o, uint32(u)) - return nil -} - -func (o *Buffer) dec_proto3_int32(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - word32Val_Set(structPointer_Word32Val(base, p.field), uint32(u)) - return nil -} - -// Decode an int64. -func (o *Buffer) dec_int64(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - word64_Set(structPointer_Word64(base, p.field), o, u) - return nil -} - -func (o *Buffer) dec_proto3_int64(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - word64Val_Set(structPointer_Word64Val(base, p.field), o, u) - return nil -} - -// Decode a string. -func (o *Buffer) dec_string(p *Properties, base structPointer) error { - s, err := o.DecodeStringBytes() - if err != nil { - return err - } - *structPointer_String(base, p.field) = &s - return nil -} - -func (o *Buffer) dec_proto3_string(p *Properties, base structPointer) error { - s, err := o.DecodeStringBytes() - if err != nil { - return err - } - *structPointer_StringVal(base, p.field) = s - return nil -} - -// Decode a slice of bytes ([]byte). -func (o *Buffer) dec_slice_byte(p *Properties, base structPointer) error { - b, err := o.DecodeRawBytes(true) - if err != nil { - return err - } - *structPointer_Bytes(base, p.field) = b - return nil -} - -// Decode a slice of bools ([]bool). -func (o *Buffer) dec_slice_bool(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - v := structPointer_BoolSlice(base, p.field) - *v = append(*v, u != 0) - return nil -} - -// Decode a slice of bools ([]bool) in packed format. -func (o *Buffer) dec_slice_packed_bool(p *Properties, base structPointer) error { - v := structPointer_BoolSlice(base, p.field) - - nn, err := o.DecodeVarint() - if err != nil { - return err - } - nb := int(nn) // number of bytes of encoded bools - fin := o.index + nb - if fin < o.index { - return errOverflow - } - - y := *v - for o.index < fin { - u, err := p.valDec(o) - if err != nil { - return err - } - y = append(y, u != 0) - } - - *v = y - return nil -} - -// Decode a slice of int32s ([]int32). -func (o *Buffer) dec_slice_int32(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - structPointer_Word32Slice(base, p.field).Append(uint32(u)) - return nil -} - -// Decode a slice of int32s ([]int32) in packed format. -func (o *Buffer) dec_slice_packed_int32(p *Properties, base structPointer) error { - v := structPointer_Word32Slice(base, p.field) - - nn, err := o.DecodeVarint() - if err != nil { - return err - } - nb := int(nn) // number of bytes of encoded int32s - - fin := o.index + nb - if fin < o.index { - return errOverflow - } - for o.index < fin { - u, err := p.valDec(o) - if err != nil { - return err - } - v.Append(uint32(u)) - } - return nil -} - -// Decode a slice of int64s ([]int64). -func (o *Buffer) dec_slice_int64(p *Properties, base structPointer) error { - u, err := p.valDec(o) - if err != nil { - return err - } - - structPointer_Word64Slice(base, p.field).Append(u) - return nil -} - -// Decode a slice of int64s ([]int64) in packed format. -func (o *Buffer) dec_slice_packed_int64(p *Properties, base structPointer) error { - v := structPointer_Word64Slice(base, p.field) - - nn, err := o.DecodeVarint() - if err != nil { - return err - } - nb := int(nn) // number of bytes of encoded int64s - - fin := o.index + nb - if fin < o.index { - return errOverflow - } - for o.index < fin { - u, err := p.valDec(o) - if err != nil { - return err - } - v.Append(u) - } - return nil -} - -// Decode a slice of strings ([]string). -func (o *Buffer) dec_slice_string(p *Properties, base structPointer) error { - s, err := o.DecodeStringBytes() - if err != nil { - return err - } - v := structPointer_StringSlice(base, p.field) - *v = append(*v, s) - return nil -} - -// Decode a slice of slice of bytes ([][]byte). -func (o *Buffer) dec_slice_slice_byte(p *Properties, base structPointer) error { - b, err := o.DecodeRawBytes(true) - if err != nil { - return err - } - v := structPointer_BytesSlice(base, p.field) - *v = append(*v, b) - return nil -} - -// Decode a map field. -func (o *Buffer) dec_new_map(p *Properties, base structPointer) error { - raw, err := o.DecodeRawBytes(false) - if err != nil { - return err - } - oi := o.index // index at the end of this map entry - o.index -= len(raw) // move buffer back to start of map entry - - mptr := structPointer_NewAt(base, p.field, p.mtype) // *map[K]V - if mptr.Elem().IsNil() { - mptr.Elem().Set(reflect.MakeMap(mptr.Type().Elem())) - } - v := mptr.Elem() // map[K]V - - // Prepare addressable doubly-indirect placeholders for the key and value types. - // See enc_new_map for why. - keyptr := reflect.New(reflect.PtrTo(p.mtype.Key())).Elem() // addressable *K - keybase := toStructPointer(keyptr.Addr()) // **K - - var valbase structPointer - var valptr reflect.Value - switch p.mtype.Elem().Kind() { - case reflect.Slice: - // []byte - var dummy []byte - valptr = reflect.ValueOf(&dummy) // *[]byte - valbase = toStructPointer(valptr) // *[]byte - case reflect.Ptr: - // message; valptr is **Msg; need to allocate the intermediate pointer - valptr = reflect.New(reflect.PtrTo(p.mtype.Elem())).Elem() // addressable *V - valptr.Set(reflect.New(valptr.Type().Elem())) - valbase = toStructPointer(valptr) - default: - // everything else - valptr = reflect.New(reflect.PtrTo(p.mtype.Elem())).Elem() // addressable *V - valbase = toStructPointer(valptr.Addr()) // **V - } - - // Decode. - // This parses a restricted wire format, namely the encoding of a message - // with two fields. See enc_new_map for the format. - for o.index < oi { - // tagcode for key and value properties are always a single byte - // because they have tags 1 and 2. - tagcode := o.buf[o.index] - o.index++ - switch tagcode { - case p.mkeyprop.tagcode[0]: - if err := p.mkeyprop.dec(o, p.mkeyprop, keybase); err != nil { - return err - } - case p.mvalprop.tagcode[0]: - if err := p.mvalprop.dec(o, p.mvalprop, valbase); err != nil { - return err - } - default: - // TODO: Should we silently skip this instead? - return fmt.Errorf("proto: bad map data tag %d", raw[0]) - } - } - keyelem, valelem := keyptr.Elem(), valptr.Elem() - if !keyelem.IsValid() { - keyelem = reflect.Zero(p.mtype.Key()) - } - if !valelem.IsValid() { - valelem = reflect.Zero(p.mtype.Elem()) - } - - v.SetMapIndex(keyelem, valelem) - return nil -} - -// Decode a group. -func (o *Buffer) dec_struct_group(p *Properties, base structPointer) error { - bas := structPointer_GetStructPointer(base, p.field) - if structPointer_IsNil(bas) { - // allocate new nested message - bas = toStructPointer(reflect.New(p.stype)) - structPointer_SetStructPointer(base, p.field, bas) - } - return o.unmarshalType(p.stype, p.sprop, true, bas) -} - -// Decode an embedded message. -func (o *Buffer) dec_struct_message(p *Properties, base structPointer) (err error) { - raw, e := o.DecodeRawBytes(false) - if e != nil { - return e - } - - bas := structPointer_GetStructPointer(base, p.field) - if structPointer_IsNil(bas) { - // allocate new nested message - bas = toStructPointer(reflect.New(p.stype)) - structPointer_SetStructPointer(base, p.field, bas) - } - - // If the object can unmarshal itself, let it. - if p.isUnmarshaler { - iv := structPointer_Interface(bas, p.stype) - return iv.(Unmarshaler).Unmarshal(raw) - } - - obuf := o.buf - oi := o.index - o.buf = raw - o.index = 0 - - err = o.unmarshalType(p.stype, p.sprop, false, bas) - o.buf = obuf - o.index = oi - - return err -} - -// Decode a slice of embedded messages. -func (o *Buffer) dec_slice_struct_message(p *Properties, base structPointer) error { - return o.dec_slice_struct(p, false, base) -} - -// Decode a slice of embedded groups. -func (o *Buffer) dec_slice_struct_group(p *Properties, base structPointer) error { - return o.dec_slice_struct(p, true, base) -} - -// Decode a slice of structs ([]*struct). -func (o *Buffer) dec_slice_struct(p *Properties, is_group bool, base structPointer) error { - v := reflect.New(p.stype) - bas := toStructPointer(v) - structPointer_StructPointerSlice(base, p.field).Append(bas) - - if is_group { - err := o.unmarshalType(p.stype, p.sprop, is_group, bas) - return err - } - - raw, err := o.DecodeRawBytes(false) - if err != nil { + if u, ok := pb.(Unmarshaler); ok { + // NOTE: The history of proto have unfortunately been inconsistent + // whether Unmarshaler should or should not implicitly clear itself. + // Some implementations do, most do not. + // Thus, calling this here may or may not do what people want. + // + // See https://github.com/golang/protobuf/issues/424 + err := u.Unmarshal(p.buf[p.index:]) + p.index = len(p.buf) return err } - // If the object can unmarshal itself, let it. - if p.isUnmarshaler { - iv := v.Interface() - return iv.(Unmarshaler).Unmarshal(raw) - } - - obuf := o.buf - oi := o.index - o.buf = raw - o.index = 0 - - err = o.unmarshalType(p.stype, p.sprop, is_group, bas) - - o.buf = obuf - o.index = oi - + // Slow workaround for messages that aren't Unmarshalers. + // This includes some hand-coded .pb.go files and + // bootstrap protos. + // TODO: fix all of those and then add Unmarshal to + // the Message interface. Then: + // The cast above and code below can be deleted. + // The old unmarshaler can be deleted. + // Clients can call Unmarshal directly (can already do that, actually). + var info InternalMessageInfo + err := info.Unmarshal(pb, p.buf[p.index:]) + p.index = len(p.buf) return err } diff --git a/vendor/github.com/golang/protobuf/proto/discard.go b/vendor/github.com/golang/protobuf/proto/discard.go index bd0e3bb4c85..dea2617ced3 100644 --- a/vendor/github.com/golang/protobuf/proto/discard.go +++ b/vendor/github.com/golang/protobuf/proto/discard.go @@ -35,8 +35,14 @@ import ( "fmt" "reflect" "strings" + "sync" + "sync/atomic" ) +type generatedDiscarder interface { + XXX_DiscardUnknown() +} + // DiscardUnknown recursively discards all unknown fields from this message // and all embedded messages. // @@ -49,9 +55,202 @@ import ( // For proto2 messages, the unknown fields of message extensions are only // discarded from messages that have been accessed via GetExtension. func DiscardUnknown(m Message) { + if m, ok := m.(generatedDiscarder); ok { + m.XXX_DiscardUnknown() + return + } + // TODO: Dynamically populate a InternalMessageInfo for legacy messages, + // but the master branch has no implementation for InternalMessageInfo, + // so it would be more work to replicate that approach. discardLegacy(m) } +// DiscardUnknown recursively discards all unknown fields. +func (a *InternalMessageInfo) DiscardUnknown(m Message) { + di := atomicLoadDiscardInfo(&a.discard) + if di == nil { + di = getDiscardInfo(reflect.TypeOf(m).Elem()) + atomicStoreDiscardInfo(&a.discard, di) + } + di.discard(toPointer(&m)) +} + +type discardInfo struct { + typ reflect.Type + + initialized int32 // 0: only typ is valid, 1: everything is valid + lock sync.Mutex + + fields []discardFieldInfo + unrecognized field +} + +type discardFieldInfo struct { + field field // Offset of field, guaranteed to be valid + discard func(src pointer) +} + +var ( + discardInfoMap = map[reflect.Type]*discardInfo{} + discardInfoLock sync.Mutex +) + +func getDiscardInfo(t reflect.Type) *discardInfo { + discardInfoLock.Lock() + defer discardInfoLock.Unlock() + di := discardInfoMap[t] + if di == nil { + di = &discardInfo{typ: t} + discardInfoMap[t] = di + } + return di +} + +func (di *discardInfo) discard(src pointer) { + if src.isNil() { + return // Nothing to do. + } + + if atomic.LoadInt32(&di.initialized) == 0 { + di.computeDiscardInfo() + } + + for _, fi := range di.fields { + sfp := src.offset(fi.field) + fi.discard(sfp) + } + + // For proto2 messages, only discard unknown fields in message extensions + // that have been accessed via GetExtension. + if em, err := extendable(src.asPointerTo(di.typ).Interface()); err == nil { + // Ignore lock since DiscardUnknown is not concurrency safe. + emm, _ := em.extensionsRead() + for _, mx := range emm { + if m, ok := mx.value.(Message); ok { + DiscardUnknown(m) + } + } + } + + if di.unrecognized.IsValid() { + *src.offset(di.unrecognized).toBytes() = nil + } +} + +func (di *discardInfo) computeDiscardInfo() { + di.lock.Lock() + defer di.lock.Unlock() + if di.initialized != 0 { + return + } + t := di.typ + n := t.NumField() + + for i := 0; i < n; i++ { + f := t.Field(i) + if strings.HasPrefix(f.Name, "XXX_") { + continue + } + + dfi := discardFieldInfo{field: toField(&f)} + tf := f.Type + + // Unwrap tf to get its most basic type. + var isPointer, isSlice bool + if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 { + isSlice = true + tf = tf.Elem() + } + if tf.Kind() == reflect.Ptr { + isPointer = true + tf = tf.Elem() + } + if isPointer && isSlice && tf.Kind() != reflect.Struct { + panic(fmt.Sprintf("%v.%s cannot be a slice of pointers to primitive types", t, f.Name)) + } + + switch tf.Kind() { + case reflect.Struct: + switch { + case !isPointer: + panic(fmt.Sprintf("%v.%s cannot be a direct struct value", t, f.Name)) + case isSlice: // E.g., []*pb.T + di := getDiscardInfo(tf) + dfi.discard = func(src pointer) { + sps := src.getPointerSlice() + for _, sp := range sps { + if !sp.isNil() { + di.discard(sp) + } + } + } + default: // E.g., *pb.T + di := getDiscardInfo(tf) + dfi.discard = func(src pointer) { + sp := src.getPointer() + if !sp.isNil() { + di.discard(sp) + } + } + } + case reflect.Map: + switch { + case isPointer || isSlice: + panic(fmt.Sprintf("%v.%s cannot be a pointer to a map or a slice of map values", t, f.Name)) + default: // E.g., map[K]V + if tf.Elem().Kind() == reflect.Ptr { // Proto struct (e.g., *T) + dfi.discard = func(src pointer) { + sm := src.asPointerTo(tf).Elem() + if sm.Len() == 0 { + return + } + for _, key := range sm.MapKeys() { + val := sm.MapIndex(key) + DiscardUnknown(val.Interface().(Message)) + } + } + } else { + dfi.discard = func(pointer) {} // Noop + } + } + case reflect.Interface: + // Must be oneof field. + switch { + case isPointer || isSlice: + panic(fmt.Sprintf("%v.%s cannot be a pointer to a interface or a slice of interface values", t, f.Name)) + default: // E.g., interface{} + // TODO: Make this faster? + dfi.discard = func(src pointer) { + su := src.asPointerTo(tf).Elem() + if !su.IsNil() { + sv := su.Elem().Elem().Field(0) + if sv.Kind() == reflect.Ptr && sv.IsNil() { + return + } + switch sv.Type().Kind() { + case reflect.Ptr: // Proto struct (e.g., *T) + DiscardUnknown(sv.Interface().(Message)) + } + } + } + } + default: + continue + } + di.fields = append(di.fields, dfi) + } + + di.unrecognized = invalidField + if f, ok := t.FieldByName("XXX_unrecognized"); ok { + if f.Type != reflect.TypeOf([]byte{}) { + panic("expected XXX_unrecognized to be of type []byte") + } + di.unrecognized = toField(&f) + } + + atomic.StoreInt32(&di.initialized, 1) +} + func discardLegacy(m Message) { v := reflect.ValueOf(m) if v.Kind() != reflect.Ptr || v.IsNil() { @@ -139,7 +338,7 @@ func discardLegacy(m Message) { // For proto2 messages, only discard unknown fields in message extensions // that have been accessed via GetExtension. - if em, ok := extendable(m); ok { + if em, err := extendable(m); err == nil { // Ignore lock since discardLegacy is not concurrency safe. emm, _ := em.extensionsRead() for _, mx := range emm { diff --git a/vendor/github.com/golang/protobuf/proto/encode.go b/vendor/github.com/golang/protobuf/proto/encode.go index 8b84d1b22d4..c27d35f866b 100644 --- a/vendor/github.com/golang/protobuf/proto/encode.go +++ b/vendor/github.com/golang/protobuf/proto/encode.go @@ -39,7 +39,6 @@ import ( "errors" "fmt" "reflect" - "sort" ) // RequiredNotSetError is the error returned if Marshal is called with @@ -82,10 +81,6 @@ var ( const maxVarintBytes = 10 // maximum length of a varint -// maxMarshalSize is the largest allowed size of an encoded protobuf, -// since C++ and Java use signed int32s for the size. -const maxMarshalSize = 1<<31 - 1 - // EncodeVarint returns the varint encoding of x. // This is the format for the // int32, int64, uint32, uint64, bool, and enum @@ -119,18 +114,27 @@ func (p *Buffer) EncodeVarint(x uint64) error { // SizeVarint returns the varint encoding size of an integer. func SizeVarint(x uint64) int { - return sizeVarint(x) -} - -func sizeVarint(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n + switch { + case x < 1<<7: + return 1 + case x < 1<<14: + return 2 + case x < 1<<21: + return 3 + case x < 1<<28: + return 4 + case x < 1<<35: + return 5 + case x < 1<<42: + return 6 + case x < 1<<49: + return 7 + case x < 1<<56: + return 8 + case x < 1<<63: + return 9 + } + return 10 } // EncodeFixed64 writes a 64-bit integer to the Buffer. @@ -149,10 +153,6 @@ func (p *Buffer) EncodeFixed64(x uint64) error { return nil } -func sizeFixed64(x uint64) int { - return 8 -} - // EncodeFixed32 writes a 32-bit integer to the Buffer. // This is the format for the // fixed32, sfixed32, and float protocol buffer types. @@ -165,20 +165,12 @@ func (p *Buffer) EncodeFixed32(x uint64) error { return nil } -func sizeFixed32(x uint64) int { - return 4 -} - // EncodeZigzag64 writes a zigzag-encoded 64-bit integer // to the Buffer. // This is the format used for the sint64 protocol buffer type. func (p *Buffer) EncodeZigzag64(x uint64) error { // use signed number to get arithmetic right shift. - return p.EncodeVarint((x << 1) ^ uint64((int64(x) >> 63))) -} - -func sizeZigzag64(x uint64) int { - return sizeVarint((x << 1) ^ uint64((int64(x) >> 63))) + return p.EncodeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63)))) } // EncodeZigzag32 writes a zigzag-encoded 32-bit integer @@ -189,10 +181,6 @@ func (p *Buffer) EncodeZigzag32(x uint64) error { return p.EncodeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31)))) } -func sizeZigzag32(x uint64) int { - return sizeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31)))) -} - // EncodeRawBytes writes a count-delimited byte buffer to the Buffer. // This is the format used for the bytes protocol buffer // type and for embedded messages. @@ -202,11 +190,6 @@ func (p *Buffer) EncodeRawBytes(b []byte) error { return nil } -func sizeRawBytes(b []byte) int { - return sizeVarint(uint64(len(b))) + - len(b) -} - // EncodeStringBytes writes an encoded string to the Buffer. // This is the format used for the proto2 string type. func (p *Buffer) EncodeStringBytes(s string) error { @@ -215,319 +198,17 @@ func (p *Buffer) EncodeStringBytes(s string) error { return nil } -func sizeStringBytes(s string) int { - return sizeVarint(uint64(len(s))) + - len(s) -} - // Marshaler is the interface representing objects that can marshal themselves. type Marshaler interface { Marshal() ([]byte, error) } -// Marshal takes the protocol buffer -// and encodes it into the wire format, returning the data. -func Marshal(pb Message) ([]byte, error) { - // Can the object marshal itself? - if m, ok := pb.(Marshaler); ok { - return m.Marshal() - } - p := NewBuffer(nil) - err := p.Marshal(pb) - if p.buf == nil && err == nil { - // Return a non-nil slice on success. - return []byte{}, nil - } - return p.buf, err -} - // EncodeMessage writes the protocol buffer to the Buffer, // prefixed by a varint-encoded length. func (p *Buffer) EncodeMessage(pb Message) error { - t, base, err := getbase(pb) - if structPointer_IsNil(base) { - return ErrNil - } - if err == nil { - var state errorState - err = p.enc_len_struct(GetProperties(t.Elem()), base, &state) - } - return err -} - -// Marshal takes the protocol buffer -// and encodes it into the wire format, writing the result to the -// Buffer. -func (p *Buffer) Marshal(pb Message) error { - // Can the object marshal itself? - if m, ok := pb.(Marshaler); ok { - data, err := m.Marshal() - p.buf = append(p.buf, data...) - return err - } - - t, base, err := getbase(pb) - if structPointer_IsNil(base) { - return ErrNil - } - if err == nil { - err = p.enc_struct(GetProperties(t.Elem()), base) - } - - if collectStats { - (stats).Encode++ // Parens are to work around a goimports bug. - } - - if len(p.buf) > maxMarshalSize { - return ErrTooLarge - } - return err -} - -// Size returns the encoded size of a protocol buffer. -func Size(pb Message) (n int) { - // Can the object marshal itself? If so, Size is slow. - // TODO: add Size to Marshaler, or add a Sizer interface. - if m, ok := pb.(Marshaler); ok { - b, _ := m.Marshal() - return len(b) - } - - t, base, err := getbase(pb) - if structPointer_IsNil(base) { - return 0 - } - if err == nil { - n = size_struct(GetProperties(t.Elem()), base) - } - - if collectStats { - (stats).Size++ // Parens are to work around a goimports bug. - } - - return -} - -// Individual type encoders. - -// Encode a bool. -func (o *Buffer) enc_bool(p *Properties, base structPointer) error { - v := *structPointer_Bool(base, p.field) - if v == nil { - return ErrNil - } - x := 0 - if *v { - x = 1 - } - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, uint64(x)) - return nil -} - -func (o *Buffer) enc_proto3_bool(p *Properties, base structPointer) error { - v := *structPointer_BoolVal(base, p.field) - if !v { - return ErrNil - } - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, 1) - return nil -} - -func size_bool(p *Properties, base structPointer) int { - v := *structPointer_Bool(base, p.field) - if v == nil { - return 0 - } - return len(p.tagcode) + 1 // each bool takes exactly one byte -} - -func size_proto3_bool(p *Properties, base structPointer) int { - v := *structPointer_BoolVal(base, p.field) - if !v && !p.oneof { - return 0 - } - return len(p.tagcode) + 1 // each bool takes exactly one byte -} - -// Encode an int32. -func (o *Buffer) enc_int32(p *Properties, base structPointer) error { - v := structPointer_Word32(base, p.field) - if word32_IsNil(v) { - return ErrNil - } - x := int32(word32_Get(v)) // permit sign extension to use full 64-bit range - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, uint64(x)) - return nil -} - -func (o *Buffer) enc_proto3_int32(p *Properties, base structPointer) error { - v := structPointer_Word32Val(base, p.field) - x := int32(word32Val_Get(v)) // permit sign extension to use full 64-bit range - if x == 0 { - return ErrNil - } - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, uint64(x)) - return nil -} - -func size_int32(p *Properties, base structPointer) (n int) { - v := structPointer_Word32(base, p.field) - if word32_IsNil(v) { - return 0 - } - x := int32(word32_Get(v)) // permit sign extension to use full 64-bit range - n += len(p.tagcode) - n += p.valSize(uint64(x)) - return -} - -func size_proto3_int32(p *Properties, base structPointer) (n int) { - v := structPointer_Word32Val(base, p.field) - x := int32(word32Val_Get(v)) // permit sign extension to use full 64-bit range - if x == 0 && !p.oneof { - return 0 - } - n += len(p.tagcode) - n += p.valSize(uint64(x)) - return -} - -// Encode a uint32. -// Exactly the same as int32, except for no sign extension. -func (o *Buffer) enc_uint32(p *Properties, base structPointer) error { - v := structPointer_Word32(base, p.field) - if word32_IsNil(v) { - return ErrNil - } - x := word32_Get(v) - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, uint64(x)) - return nil -} - -func (o *Buffer) enc_proto3_uint32(p *Properties, base structPointer) error { - v := structPointer_Word32Val(base, p.field) - x := word32Val_Get(v) - if x == 0 { - return ErrNil - } - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, uint64(x)) - return nil -} - -func size_uint32(p *Properties, base structPointer) (n int) { - v := structPointer_Word32(base, p.field) - if word32_IsNil(v) { - return 0 - } - x := word32_Get(v) - n += len(p.tagcode) - n += p.valSize(uint64(x)) - return -} - -func size_proto3_uint32(p *Properties, base structPointer) (n int) { - v := structPointer_Word32Val(base, p.field) - x := word32Val_Get(v) - if x == 0 && !p.oneof { - return 0 - } - n += len(p.tagcode) - n += p.valSize(uint64(x)) - return -} - -// Encode an int64. -func (o *Buffer) enc_int64(p *Properties, base structPointer) error { - v := structPointer_Word64(base, p.field) - if word64_IsNil(v) { - return ErrNil - } - x := word64_Get(v) - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, x) - return nil -} - -func (o *Buffer) enc_proto3_int64(p *Properties, base structPointer) error { - v := structPointer_Word64Val(base, p.field) - x := word64Val_Get(v) - if x == 0 { - return ErrNil - } - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, x) - return nil -} - -func size_int64(p *Properties, base structPointer) (n int) { - v := structPointer_Word64(base, p.field) - if word64_IsNil(v) { - return 0 - } - x := word64_Get(v) - n += len(p.tagcode) - n += p.valSize(x) - return -} - -func size_proto3_int64(p *Properties, base structPointer) (n int) { - v := structPointer_Word64Val(base, p.field) - x := word64Val_Get(v) - if x == 0 && !p.oneof { - return 0 - } - n += len(p.tagcode) - n += p.valSize(x) - return -} - -// Encode a string. -func (o *Buffer) enc_string(p *Properties, base structPointer) error { - v := *structPointer_String(base, p.field) - if v == nil { - return ErrNil - } - x := *v - o.buf = append(o.buf, p.tagcode...) - o.EncodeStringBytes(x) - return nil -} - -func (o *Buffer) enc_proto3_string(p *Properties, base structPointer) error { - v := *structPointer_StringVal(base, p.field) - if v == "" { - return ErrNil - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeStringBytes(v) - return nil -} - -func size_string(p *Properties, base structPointer) (n int) { - v := *structPointer_String(base, p.field) - if v == nil { - return 0 - } - x := *v - n += len(p.tagcode) - n += sizeStringBytes(x) - return -} - -func size_proto3_string(p *Properties, base structPointer) (n int) { - v := *structPointer_StringVal(base, p.field) - if v == "" && !p.oneof { - return 0 - } - n += len(p.tagcode) - n += sizeStringBytes(v) - return + siz := Size(pb) + p.EncodeVarint(uint64(siz)) + return p.Marshal(pb) } // All protocol buffer fields are nillable, but be careful. @@ -538,825 +219,3 @@ func isNil(v reflect.Value) bool { } return false } - -// Encode a message struct. -func (o *Buffer) enc_struct_message(p *Properties, base structPointer) error { - var state errorState - structp := structPointer_GetStructPointer(base, p.field) - if structPointer_IsNil(structp) { - return ErrNil - } - - // Can the object marshal itself? - if p.isMarshaler { - m := structPointer_Interface(structp, p.stype).(Marshaler) - data, err := m.Marshal() - if err != nil && !state.shouldContinue(err, nil) { - return err - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeRawBytes(data) - return state.err - } - - o.buf = append(o.buf, p.tagcode...) - return o.enc_len_struct(p.sprop, structp, &state) -} - -func size_struct_message(p *Properties, base structPointer) int { - structp := structPointer_GetStructPointer(base, p.field) - if structPointer_IsNil(structp) { - return 0 - } - - // Can the object marshal itself? - if p.isMarshaler { - m := structPointer_Interface(structp, p.stype).(Marshaler) - data, _ := m.Marshal() - n0 := len(p.tagcode) - n1 := sizeRawBytes(data) - return n0 + n1 - } - - n0 := len(p.tagcode) - n1 := size_struct(p.sprop, structp) - n2 := sizeVarint(uint64(n1)) // size of encoded length - return n0 + n1 + n2 -} - -// Encode a group struct. -func (o *Buffer) enc_struct_group(p *Properties, base structPointer) error { - var state errorState - b := structPointer_GetStructPointer(base, p.field) - if structPointer_IsNil(b) { - return ErrNil - } - - o.EncodeVarint(uint64((p.Tag << 3) | WireStartGroup)) - err := o.enc_struct(p.sprop, b) - if err != nil && !state.shouldContinue(err, nil) { - return err - } - o.EncodeVarint(uint64((p.Tag << 3) | WireEndGroup)) - return state.err -} - -func size_struct_group(p *Properties, base structPointer) (n int) { - b := structPointer_GetStructPointer(base, p.field) - if structPointer_IsNil(b) { - return 0 - } - - n += sizeVarint(uint64((p.Tag << 3) | WireStartGroup)) - n += size_struct(p.sprop, b) - n += sizeVarint(uint64((p.Tag << 3) | WireEndGroup)) - return -} - -// Encode a slice of bools ([]bool). -func (o *Buffer) enc_slice_bool(p *Properties, base structPointer) error { - s := *structPointer_BoolSlice(base, p.field) - l := len(s) - if l == 0 { - return ErrNil - } - for _, x := range s { - o.buf = append(o.buf, p.tagcode...) - v := uint64(0) - if x { - v = 1 - } - p.valEnc(o, v) - } - return nil -} - -func size_slice_bool(p *Properties, base structPointer) int { - s := *structPointer_BoolSlice(base, p.field) - l := len(s) - if l == 0 { - return 0 - } - return l * (len(p.tagcode) + 1) // each bool takes exactly one byte -} - -// Encode a slice of bools ([]bool) in packed format. -func (o *Buffer) enc_slice_packed_bool(p *Properties, base structPointer) error { - s := *structPointer_BoolSlice(base, p.field) - l := len(s) - if l == 0 { - return ErrNil - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeVarint(uint64(l)) // each bool takes exactly one byte - for _, x := range s { - v := uint64(0) - if x { - v = 1 - } - p.valEnc(o, v) - } - return nil -} - -func size_slice_packed_bool(p *Properties, base structPointer) (n int) { - s := *structPointer_BoolSlice(base, p.field) - l := len(s) - if l == 0 { - return 0 - } - n += len(p.tagcode) - n += sizeVarint(uint64(l)) - n += l // each bool takes exactly one byte - return -} - -// Encode a slice of bytes ([]byte). -func (o *Buffer) enc_slice_byte(p *Properties, base structPointer) error { - s := *structPointer_Bytes(base, p.field) - if s == nil { - return ErrNil - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeRawBytes(s) - return nil -} - -func (o *Buffer) enc_proto3_slice_byte(p *Properties, base structPointer) error { - s := *structPointer_Bytes(base, p.field) - if len(s) == 0 { - return ErrNil - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeRawBytes(s) - return nil -} - -func size_slice_byte(p *Properties, base structPointer) (n int) { - s := *structPointer_Bytes(base, p.field) - if s == nil && !p.oneof { - return 0 - } - n += len(p.tagcode) - n += sizeRawBytes(s) - return -} - -func size_proto3_slice_byte(p *Properties, base structPointer) (n int) { - s := *structPointer_Bytes(base, p.field) - if len(s) == 0 && !p.oneof { - return 0 - } - n += len(p.tagcode) - n += sizeRawBytes(s) - return -} - -// Encode a slice of int32s ([]int32). -func (o *Buffer) enc_slice_int32(p *Properties, base structPointer) error { - s := structPointer_Word32Slice(base, p.field) - l := s.Len() - if l == 0 { - return ErrNil - } - for i := 0; i < l; i++ { - o.buf = append(o.buf, p.tagcode...) - x := int32(s.Index(i)) // permit sign extension to use full 64-bit range - p.valEnc(o, uint64(x)) - } - return nil -} - -func size_slice_int32(p *Properties, base structPointer) (n int) { - s := structPointer_Word32Slice(base, p.field) - l := s.Len() - if l == 0 { - return 0 - } - for i := 0; i < l; i++ { - n += len(p.tagcode) - x := int32(s.Index(i)) // permit sign extension to use full 64-bit range - n += p.valSize(uint64(x)) - } - return -} - -// Encode a slice of int32s ([]int32) in packed format. -func (o *Buffer) enc_slice_packed_int32(p *Properties, base structPointer) error { - s := structPointer_Word32Slice(base, p.field) - l := s.Len() - if l == 0 { - return ErrNil - } - // TODO: Reuse a Buffer. - buf := NewBuffer(nil) - for i := 0; i < l; i++ { - x := int32(s.Index(i)) // permit sign extension to use full 64-bit range - p.valEnc(buf, uint64(x)) - } - - o.buf = append(o.buf, p.tagcode...) - o.EncodeVarint(uint64(len(buf.buf))) - o.buf = append(o.buf, buf.buf...) - return nil -} - -func size_slice_packed_int32(p *Properties, base structPointer) (n int) { - s := structPointer_Word32Slice(base, p.field) - l := s.Len() - if l == 0 { - return 0 - } - var bufSize int - for i := 0; i < l; i++ { - x := int32(s.Index(i)) // permit sign extension to use full 64-bit range - bufSize += p.valSize(uint64(x)) - } - - n += len(p.tagcode) - n += sizeVarint(uint64(bufSize)) - n += bufSize - return -} - -// Encode a slice of uint32s ([]uint32). -// Exactly the same as int32, except for no sign extension. -func (o *Buffer) enc_slice_uint32(p *Properties, base structPointer) error { - s := structPointer_Word32Slice(base, p.field) - l := s.Len() - if l == 0 { - return ErrNil - } - for i := 0; i < l; i++ { - o.buf = append(o.buf, p.tagcode...) - x := s.Index(i) - p.valEnc(o, uint64(x)) - } - return nil -} - -func size_slice_uint32(p *Properties, base structPointer) (n int) { - s := structPointer_Word32Slice(base, p.field) - l := s.Len() - if l == 0 { - return 0 - } - for i := 0; i < l; i++ { - n += len(p.tagcode) - x := s.Index(i) - n += p.valSize(uint64(x)) - } - return -} - -// Encode a slice of uint32s ([]uint32) in packed format. -// Exactly the same as int32, except for no sign extension. -func (o *Buffer) enc_slice_packed_uint32(p *Properties, base structPointer) error { - s := structPointer_Word32Slice(base, p.field) - l := s.Len() - if l == 0 { - return ErrNil - } - // TODO: Reuse a Buffer. - buf := NewBuffer(nil) - for i := 0; i < l; i++ { - p.valEnc(buf, uint64(s.Index(i))) - } - - o.buf = append(o.buf, p.tagcode...) - o.EncodeVarint(uint64(len(buf.buf))) - o.buf = append(o.buf, buf.buf...) - return nil -} - -func size_slice_packed_uint32(p *Properties, base structPointer) (n int) { - s := structPointer_Word32Slice(base, p.field) - l := s.Len() - if l == 0 { - return 0 - } - var bufSize int - for i := 0; i < l; i++ { - bufSize += p.valSize(uint64(s.Index(i))) - } - - n += len(p.tagcode) - n += sizeVarint(uint64(bufSize)) - n += bufSize - return -} - -// Encode a slice of int64s ([]int64). -func (o *Buffer) enc_slice_int64(p *Properties, base structPointer) error { - s := structPointer_Word64Slice(base, p.field) - l := s.Len() - if l == 0 { - return ErrNil - } - for i := 0; i < l; i++ { - o.buf = append(o.buf, p.tagcode...) - p.valEnc(o, s.Index(i)) - } - return nil -} - -func size_slice_int64(p *Properties, base structPointer) (n int) { - s := structPointer_Word64Slice(base, p.field) - l := s.Len() - if l == 0 { - return 0 - } - for i := 0; i < l; i++ { - n += len(p.tagcode) - n += p.valSize(s.Index(i)) - } - return -} - -// Encode a slice of int64s ([]int64) in packed format. -func (o *Buffer) enc_slice_packed_int64(p *Properties, base structPointer) error { - s := structPointer_Word64Slice(base, p.field) - l := s.Len() - if l == 0 { - return ErrNil - } - // TODO: Reuse a Buffer. - buf := NewBuffer(nil) - for i := 0; i < l; i++ { - p.valEnc(buf, s.Index(i)) - } - - o.buf = append(o.buf, p.tagcode...) - o.EncodeVarint(uint64(len(buf.buf))) - o.buf = append(o.buf, buf.buf...) - return nil -} - -func size_slice_packed_int64(p *Properties, base structPointer) (n int) { - s := structPointer_Word64Slice(base, p.field) - l := s.Len() - if l == 0 { - return 0 - } - var bufSize int - for i := 0; i < l; i++ { - bufSize += p.valSize(s.Index(i)) - } - - n += len(p.tagcode) - n += sizeVarint(uint64(bufSize)) - n += bufSize - return -} - -// Encode a slice of slice of bytes ([][]byte). -func (o *Buffer) enc_slice_slice_byte(p *Properties, base structPointer) error { - ss := *structPointer_BytesSlice(base, p.field) - l := len(ss) - if l == 0 { - return ErrNil - } - for i := 0; i < l; i++ { - o.buf = append(o.buf, p.tagcode...) - o.EncodeRawBytes(ss[i]) - } - return nil -} - -func size_slice_slice_byte(p *Properties, base structPointer) (n int) { - ss := *structPointer_BytesSlice(base, p.field) - l := len(ss) - if l == 0 { - return 0 - } - n += l * len(p.tagcode) - for i := 0; i < l; i++ { - n += sizeRawBytes(ss[i]) - } - return -} - -// Encode a slice of strings ([]string). -func (o *Buffer) enc_slice_string(p *Properties, base structPointer) error { - ss := *structPointer_StringSlice(base, p.field) - l := len(ss) - for i := 0; i < l; i++ { - o.buf = append(o.buf, p.tagcode...) - o.EncodeStringBytes(ss[i]) - } - return nil -} - -func size_slice_string(p *Properties, base structPointer) (n int) { - ss := *structPointer_StringSlice(base, p.field) - l := len(ss) - n += l * len(p.tagcode) - for i := 0; i < l; i++ { - n += sizeStringBytes(ss[i]) - } - return -} - -// Encode a slice of message structs ([]*struct). -func (o *Buffer) enc_slice_struct_message(p *Properties, base structPointer) error { - var state errorState - s := structPointer_StructPointerSlice(base, p.field) - l := s.Len() - - for i := 0; i < l; i++ { - structp := s.Index(i) - if structPointer_IsNil(structp) { - return errRepeatedHasNil - } - - // Can the object marshal itself? - if p.isMarshaler { - m := structPointer_Interface(structp, p.stype).(Marshaler) - data, err := m.Marshal() - if err != nil && !state.shouldContinue(err, nil) { - return err - } - o.buf = append(o.buf, p.tagcode...) - o.EncodeRawBytes(data) - continue - } - - o.buf = append(o.buf, p.tagcode...) - err := o.enc_len_struct(p.sprop, structp, &state) - if err != nil && !state.shouldContinue(err, nil) { - if err == ErrNil { - return errRepeatedHasNil - } - return err - } - } - return state.err -} - -func size_slice_struct_message(p *Properties, base structPointer) (n int) { - s := structPointer_StructPointerSlice(base, p.field) - l := s.Len() - n += l * len(p.tagcode) - for i := 0; i < l; i++ { - structp := s.Index(i) - if structPointer_IsNil(structp) { - return // return the size up to this point - } - - // Can the object marshal itself? - if p.isMarshaler { - m := structPointer_Interface(structp, p.stype).(Marshaler) - data, _ := m.Marshal() - n += sizeRawBytes(data) - continue - } - - n0 := size_struct(p.sprop, structp) - n1 := sizeVarint(uint64(n0)) // size of encoded length - n += n0 + n1 - } - return -} - -// Encode a slice of group structs ([]*struct). -func (o *Buffer) enc_slice_struct_group(p *Properties, base structPointer) error { - var state errorState - s := structPointer_StructPointerSlice(base, p.field) - l := s.Len() - - for i := 0; i < l; i++ { - b := s.Index(i) - if structPointer_IsNil(b) { - return errRepeatedHasNil - } - - o.EncodeVarint(uint64((p.Tag << 3) | WireStartGroup)) - - err := o.enc_struct(p.sprop, b) - - if err != nil && !state.shouldContinue(err, nil) { - if err == ErrNil { - return errRepeatedHasNil - } - return err - } - - o.EncodeVarint(uint64((p.Tag << 3) | WireEndGroup)) - } - return state.err -} - -func size_slice_struct_group(p *Properties, base structPointer) (n int) { - s := structPointer_StructPointerSlice(base, p.field) - l := s.Len() - - n += l * sizeVarint(uint64((p.Tag<<3)|WireStartGroup)) - n += l * sizeVarint(uint64((p.Tag<<3)|WireEndGroup)) - for i := 0; i < l; i++ { - b := s.Index(i) - if structPointer_IsNil(b) { - return // return size up to this point - } - - n += size_struct(p.sprop, b) - } - return -} - -// Encode an extension map. -func (o *Buffer) enc_map(p *Properties, base structPointer) error { - exts := structPointer_ExtMap(base, p.field) - if err := encodeExtensionsMap(*exts); err != nil { - return err - } - - return o.enc_map_body(*exts) -} - -func (o *Buffer) enc_exts(p *Properties, base structPointer) error { - exts := structPointer_Extensions(base, p.field) - - v, mu := exts.extensionsRead() - if v == nil { - return nil - } - - mu.Lock() - defer mu.Unlock() - if err := encodeExtensionsMap(v); err != nil { - return err - } - - return o.enc_map_body(v) -} - -func (o *Buffer) enc_map_body(v map[int32]Extension) error { - // Fast-path for common cases: zero or one extensions. - if len(v) <= 1 { - for _, e := range v { - o.buf = append(o.buf, e.enc...) - } - return nil - } - - // Sort keys to provide a deterministic encoding. - keys := make([]int, 0, len(v)) - for k := range v { - keys = append(keys, int(k)) - } - sort.Ints(keys) - - for _, k := range keys { - o.buf = append(o.buf, v[int32(k)].enc...) - } - return nil -} - -func size_map(p *Properties, base structPointer) int { - v := structPointer_ExtMap(base, p.field) - return extensionsMapSize(*v) -} - -func size_exts(p *Properties, base structPointer) int { - v := structPointer_Extensions(base, p.field) - return extensionsSize(v) -} - -// Encode a map field. -func (o *Buffer) enc_new_map(p *Properties, base structPointer) error { - var state errorState // XXX: or do we need to plumb this through? - - /* - A map defined as - map map_field = N; - is encoded in the same way as - message MapFieldEntry { - key_type key = 1; - value_type value = 2; - } - repeated MapFieldEntry map_field = N; - */ - - v := structPointer_NewAt(base, p.field, p.mtype).Elem() // map[K]V - if v.Len() == 0 { - return nil - } - - keycopy, valcopy, keybase, valbase := mapEncodeScratch(p.mtype) - - enc := func() error { - if err := p.mkeyprop.enc(o, p.mkeyprop, keybase); err != nil { - return err - } - if err := p.mvalprop.enc(o, p.mvalprop, valbase); err != nil && err != ErrNil { - return err - } - return nil - } - - // Don't sort map keys. It is not required by the spec, and C++ doesn't do it. - for _, key := range v.MapKeys() { - val := v.MapIndex(key) - - keycopy.Set(key) - valcopy.Set(val) - - o.buf = append(o.buf, p.tagcode...) - if err := o.enc_len_thing(enc, &state); err != nil { - return err - } - } - return nil -} - -func size_new_map(p *Properties, base structPointer) int { - v := structPointer_NewAt(base, p.field, p.mtype).Elem() // map[K]V - - keycopy, valcopy, keybase, valbase := mapEncodeScratch(p.mtype) - - n := 0 - for _, key := range v.MapKeys() { - val := v.MapIndex(key) - keycopy.Set(key) - valcopy.Set(val) - - // Tag codes for key and val are the responsibility of the sub-sizer. - keysize := p.mkeyprop.size(p.mkeyprop, keybase) - valsize := p.mvalprop.size(p.mvalprop, valbase) - entry := keysize + valsize - // Add on tag code and length of map entry itself. - n += len(p.tagcode) + sizeVarint(uint64(entry)) + entry - } - return n -} - -// mapEncodeScratch returns a new reflect.Value matching the map's value type, -// and a structPointer suitable for passing to an encoder or sizer. -func mapEncodeScratch(mapType reflect.Type) (keycopy, valcopy reflect.Value, keybase, valbase structPointer) { - // Prepare addressable doubly-indirect placeholders for the key and value types. - // This is needed because the element-type encoders expect **T, but the map iteration produces T. - - keycopy = reflect.New(mapType.Key()).Elem() // addressable K - keyptr := reflect.New(reflect.PtrTo(keycopy.Type())).Elem() // addressable *K - keyptr.Set(keycopy.Addr()) // - keybase = toStructPointer(keyptr.Addr()) // **K - - // Value types are more varied and require special handling. - switch mapType.Elem().Kind() { - case reflect.Slice: - // []byte - var dummy []byte - valcopy = reflect.ValueOf(&dummy).Elem() // addressable []byte - valbase = toStructPointer(valcopy.Addr()) - case reflect.Ptr: - // message; the generated field type is map[K]*Msg (so V is *Msg), - // so we only need one level of indirection. - valcopy = reflect.New(mapType.Elem()).Elem() // addressable V - valbase = toStructPointer(valcopy.Addr()) - default: - // everything else - valcopy = reflect.New(mapType.Elem()).Elem() // addressable V - valptr := reflect.New(reflect.PtrTo(valcopy.Type())).Elem() // addressable *V - valptr.Set(valcopy.Addr()) // - valbase = toStructPointer(valptr.Addr()) // **V - } - return -} - -// Encode a struct. -func (o *Buffer) enc_struct(prop *StructProperties, base structPointer) error { - var state errorState - // Encode fields in tag order so that decoders may use optimizations - // that depend on the ordering. - // https://developers.google.com/protocol-buffers/docs/encoding#order - for _, i := range prop.order { - p := prop.Prop[i] - if p.enc != nil { - err := p.enc(o, p, base) - if err != nil { - if err == ErrNil { - if p.Required && state.err == nil { - state.err = &RequiredNotSetError{p.Name} - } - } else if err == errRepeatedHasNil { - // Give more context to nil values in repeated fields. - return errors.New("repeated field " + p.OrigName + " has nil element") - } else if !state.shouldContinue(err, p) { - return err - } - } - if len(o.buf) > maxMarshalSize { - return ErrTooLarge - } - } - } - - // Do oneof fields. - if prop.oneofMarshaler != nil { - m := structPointer_Interface(base, prop.stype).(Message) - if err := prop.oneofMarshaler(m, o); err == ErrNil { - return errOneofHasNil - } else if err != nil { - return err - } - } - - // Add unrecognized fields at the end. - if prop.unrecField.IsValid() { - v := *structPointer_Bytes(base, prop.unrecField) - if len(o.buf)+len(v) > maxMarshalSize { - return ErrTooLarge - } - if len(v) > 0 { - o.buf = append(o.buf, v...) - } - } - - return state.err -} - -func size_struct(prop *StructProperties, base structPointer) (n int) { - for _, i := range prop.order { - p := prop.Prop[i] - if p.size != nil { - n += p.size(p, base) - } - } - - // Add unrecognized fields at the end. - if prop.unrecField.IsValid() { - v := *structPointer_Bytes(base, prop.unrecField) - n += len(v) - } - - // Factor in any oneof fields. - if prop.oneofSizer != nil { - m := structPointer_Interface(base, prop.stype).(Message) - n += prop.oneofSizer(m) - } - - return -} - -var zeroes [20]byte // longer than any conceivable sizeVarint - -// Encode a struct, preceded by its encoded length (as a varint). -func (o *Buffer) enc_len_struct(prop *StructProperties, base structPointer, state *errorState) error { - return o.enc_len_thing(func() error { return o.enc_struct(prop, base) }, state) -} - -// Encode something, preceded by its encoded length (as a varint). -func (o *Buffer) enc_len_thing(enc func() error, state *errorState) error { - iLen := len(o.buf) - o.buf = append(o.buf, 0, 0, 0, 0) // reserve four bytes for length - iMsg := len(o.buf) - err := enc() - if err != nil && !state.shouldContinue(err, nil) { - return err - } - lMsg := len(o.buf) - iMsg - lLen := sizeVarint(uint64(lMsg)) - switch x := lLen - (iMsg - iLen); { - case x > 0: // actual length is x bytes larger than the space we reserved - // Move msg x bytes right. - o.buf = append(o.buf, zeroes[:x]...) - copy(o.buf[iMsg+x:], o.buf[iMsg:iMsg+lMsg]) - case x < 0: // actual length is x bytes smaller than the space we reserved - // Move msg x bytes left. - copy(o.buf[iMsg+x:], o.buf[iMsg:iMsg+lMsg]) - o.buf = o.buf[:len(o.buf)+x] // x is negative - } - // Encode the length in the reserved space. - o.buf = o.buf[:iLen] - o.EncodeVarint(uint64(lMsg)) - o.buf = o.buf[:len(o.buf)+lMsg] - return state.err -} - -// errorState maintains the first error that occurs and updates that error -// with additional context. -type errorState struct { - err error -} - -// shouldContinue reports whether encoding should continue upon encountering the -// given error. If the error is RequiredNotSetError, shouldContinue returns true -// and, if this is the first appearance of that error, remembers it for future -// reporting. -// -// If prop is not nil, it may update any error with additional context about the -// field with the error. -func (s *errorState) shouldContinue(err error, prop *Properties) bool { - // Ignore unset required fields. - reqNotSet, ok := err.(*RequiredNotSetError) - if !ok { - return false - } - if s.err == nil { - if prop != nil { - err = &RequiredNotSetError{prop.Name + "." + reqNotSet.field} - } - s.err = err - } - return true -} diff --git a/vendor/github.com/golang/protobuf/proto/equal.go b/vendor/github.com/golang/protobuf/proto/equal.go index 2ed1cf59666..d4db5a1c145 100644 --- a/vendor/github.com/golang/protobuf/proto/equal.go +++ b/vendor/github.com/golang/protobuf/proto/equal.go @@ -109,15 +109,6 @@ func equalStruct(v1, v2 reflect.Value) bool { // set/unset mismatch return false } - b1, ok := f1.Interface().(raw) - if ok { - b2 := f2.Interface().(raw) - // RawMessage - if !bytes.Equal(b1.Bytes(), b2.Bytes()) { - return false - } - continue - } f1, f2 = f1.Elem(), f2.Elem() } if !equalAny(f1, f2, sprop.Prop[i]) { @@ -146,11 +137,7 @@ func equalStruct(v1, v2 reflect.Value) bool { u1 := uf.Bytes() u2 := v2.FieldByName("XXX_unrecognized").Bytes() - if !bytes.Equal(u1, u2) { - return false - } - - return true + return bytes.Equal(u1, u2) } // v1 and v2 are known to have the same type. @@ -261,6 +248,15 @@ func equalExtMap(base reflect.Type, em1, em2 map[int32]Extension) bool { m1, m2 := e1.value, e2.value + if m1 == nil && m2 == nil { + // Both have only encoded form. + if bytes.Equal(e1.enc, e2.enc) { + continue + } + // The bytes are different, but the extensions might still be + // equal. We need to decode them to compare. + } + if m1 != nil && m2 != nil { // Both are unencoded. if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) { @@ -276,8 +272,12 @@ func equalExtMap(base reflect.Type, em1, em2 map[int32]Extension) bool { desc = m[extNum] } if desc == nil { + // If both have only encoded form and the bytes are the same, + // it is handled above. We get here when the bytes are different. + // We don't know how to decode it, so just compare them as byte + // slices. log.Printf("proto: don't know how to compare extension %d of %v", extNum, base) - continue + return false } var err error if m1 == nil { diff --git a/vendor/github.com/golang/protobuf/proto/extensions.go b/vendor/github.com/golang/protobuf/proto/extensions.go index eaad2183126..816a3b9d6c0 100644 --- a/vendor/github.com/golang/protobuf/proto/extensions.go +++ b/vendor/github.com/golang/protobuf/proto/extensions.go @@ -38,6 +38,7 @@ package proto import ( "errors" "fmt" + "io" "reflect" "strconv" "sync" @@ -91,14 +92,29 @@ func (n notLocker) Unlock() {} // extendable returns the extendableProto interface for the given generated proto message. // If the proto message has the old extension format, it returns a wrapper that implements // the extendableProto interface. -func extendable(p interface{}) (extendableProto, bool) { - if ep, ok := p.(extendableProto); ok { - return ep, ok - } - if ep, ok := p.(extendableProtoV1); ok { - return extensionAdapter{ep}, ok +func extendable(p interface{}) (extendableProto, error) { + switch p := p.(type) { + case extendableProto: + if isNilPtr(p) { + return nil, fmt.Errorf("proto: nil %T is not extendable", p) + } + return p, nil + case extendableProtoV1: + if isNilPtr(p) { + return nil, fmt.Errorf("proto: nil %T is not extendable", p) + } + return extensionAdapter{p}, nil } - return nil, false + // Don't allocate a specific error containing %T: + // this is the hot path for Clone and MarshalText. + return nil, errNotExtendable +} + +var errNotExtendable = errors.New("proto: not an extendable proto.Message") + +func isNilPtr(x interface{}) bool { + v := reflect.ValueOf(x) + return v.Kind() == reflect.Ptr && v.IsNil() } // XXX_InternalExtensions is an internal representation of proto extensions. @@ -143,9 +159,6 @@ func (e *XXX_InternalExtensions) extensionsRead() (map[int32]Extension, sync.Loc return e.p.extensionMap, &e.p.mu } -var extendableProtoType = reflect.TypeOf((*extendableProto)(nil)).Elem() -var extendableProtoV1Type = reflect.TypeOf((*extendableProtoV1)(nil)).Elem() - // ExtensionDesc represents an extension specification. // Used in generated code from the protocol compiler. type ExtensionDesc struct { @@ -179,8 +192,8 @@ type Extension struct { // SetRawExtension is for testing only. func SetRawExtension(base Message, id int32, b []byte) { - epb, ok := extendable(base) - if !ok { + epb, err := extendable(base) + if err != nil { return } extmap := epb.extensionsWrite() @@ -205,7 +218,7 @@ func checkExtensionTypes(pb extendableProto, extension *ExtensionDesc) error { pbi = ea.extendableProtoV1 } if a, b := reflect.TypeOf(pbi), reflect.TypeOf(extension.ExtendedType); a != b { - return errors.New("proto: bad extended type; " + b.String() + " does not extend " + a.String()) + return fmt.Errorf("proto: bad extended type; %v does not extend %v", b, a) } // Check the range. if !isExtensionField(pb, extension.Field) { @@ -250,85 +263,11 @@ func extensionProperties(ed *ExtensionDesc) *Properties { return prop } -// encode encodes any unmarshaled (unencoded) extensions in e. -func encodeExtensions(e *XXX_InternalExtensions) error { - m, mu := e.extensionsRead() - if m == nil { - return nil // fast path - } - mu.Lock() - defer mu.Unlock() - return encodeExtensionsMap(m) -} - -// encode encodes any unmarshaled (unencoded) extensions in e. -func encodeExtensionsMap(m map[int32]Extension) error { - for k, e := range m { - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - - et := reflect.TypeOf(e.desc.ExtensionType) - props := extensionProperties(e.desc) - - p := NewBuffer(nil) - // If e.value has type T, the encoder expects a *struct{ X T }. - // Pass a *T with a zero field and hope it all works out. - x := reflect.New(et) - x.Elem().Set(reflect.ValueOf(e.value)) - if err := props.enc(p, props, toStructPointer(x)); err != nil { - return err - } - e.enc = p.buf - m[k] = e - } - return nil -} - -func extensionsSize(e *XXX_InternalExtensions) (n int) { - m, mu := e.extensionsRead() - if m == nil { - return 0 - } - mu.Lock() - defer mu.Unlock() - return extensionsMapSize(m) -} - -func extensionsMapSize(m map[int32]Extension) (n int) { - for _, e := range m { - if e.value == nil || e.desc == nil { - // Extension is only in its encoded form. - n += len(e.enc) - continue - } - - // We don't skip extensions that have an encoded form set, - // because the extension value may have been mutated after - // the last time this function was called. - - et := reflect.TypeOf(e.desc.ExtensionType) - props := extensionProperties(e.desc) - - // If e.value has type T, the encoder expects a *struct{ X T }. - // Pass a *T with a zero field and hope it all works out. - x := reflect.New(et) - x.Elem().Set(reflect.ValueOf(e.value)) - n += props.size(props, toStructPointer(x)) - } - return -} - // HasExtension returns whether the given extension is present in pb. func HasExtension(pb Message, extension *ExtensionDesc) bool { // TODO: Check types, field numbers, etc.? - epb, ok := extendable(pb) - if !ok { + epb, err := extendable(pb) + if err != nil { return false } extmap, mu := epb.extensionsRead() @@ -336,15 +275,15 @@ func HasExtension(pb Message, extension *ExtensionDesc) bool { return false } mu.Lock() - _, ok = extmap[extension.Field] + _, ok := extmap[extension.Field] mu.Unlock() return ok } // ClearExtension removes the given extension from pb. func ClearExtension(pb Message, extension *ExtensionDesc) { - epb, ok := extendable(pb) - if !ok { + epb, err := extendable(pb) + if err != nil { return } // TODO: Check types, field numbers, etc.? @@ -352,16 +291,26 @@ func ClearExtension(pb Message, extension *ExtensionDesc) { delete(extmap, extension.Field) } -// GetExtension parses and returns the given extension of pb. -// If the extension is not present and has no default value it returns ErrMissingExtension. +// GetExtension retrieves a proto2 extended field from pb. +// +// If the descriptor is type complete (i.e., ExtensionDesc.ExtensionType is non-nil), +// then GetExtension parses the encoded field and returns a Go value of the specified type. +// If the field is not present, then the default value is returned (if one is specified), +// otherwise ErrMissingExtension is reported. +// +// If the descriptor is not type complete (i.e., ExtensionDesc.ExtensionType is nil), +// then GetExtension returns the raw encoded bytes of the field extension. func GetExtension(pb Message, extension *ExtensionDesc) (interface{}, error) { - epb, ok := extendable(pb) - if !ok { - return nil, errors.New("proto: not an extendable proto") + epb, err := extendable(pb) + if err != nil { + return nil, err } - if err := checkExtensionTypes(epb, extension); err != nil { - return nil, err + if extension.ExtendedType != nil { + // can only check type if this is a complete descriptor + if err := checkExtensionTypes(epb, extension); err != nil { + return nil, err + } } emap, mu := epb.extensionsRead() @@ -388,6 +337,11 @@ func GetExtension(pb Message, extension *ExtensionDesc) (interface{}, error) { return e.value, nil } + if extension.ExtensionType == nil { + // incomplete descriptor + return e.enc, nil + } + v, err := decodeExtension(e.enc, extension) if err != nil { return nil, err @@ -405,6 +359,11 @@ func GetExtension(pb Message, extension *ExtensionDesc) (interface{}, error) { // defaultExtensionValue returns the default value for extension. // If no default for an extension is defined ErrMissingExtension is returned. func defaultExtensionValue(extension *ExtensionDesc) (interface{}, error) { + if extension.ExtensionType == nil { + // incomplete descriptor, so no default + return nil, ErrMissingExtension + } + t := reflect.TypeOf(extension.ExtensionType) props := extensionProperties(extension) @@ -439,31 +398,28 @@ func defaultExtensionValue(extension *ExtensionDesc) (interface{}, error) { // decodeExtension decodes an extension encoded in b. func decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) { - o := NewBuffer(b) - t := reflect.TypeOf(extension.ExtensionType) - - props := extensionProperties(extension) + unmarshal := typeUnmarshaler(t, extension.Tag) // t is a pointer to a struct, pointer to basic type or a slice. - // Allocate a "field" to store the pointer/slice itself; the - // pointer/slice will be stored here. We pass - // the address of this field to props.dec. - // This passes a zero field and a *t and lets props.dec - // interpret it as a *struct{ x t }. + // Allocate space to store the pointer/slice. value := reflect.New(t).Elem() + var err error for { - // Discard wire type and field number varint. It isn't needed. - if _, err := o.DecodeVarint(); err != nil { - return nil, err + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF } + b = b[n:] + wire := int(x) & 7 - if err := props.dec(o, props, toStructPointer(value.Addr())); err != nil { + b, err = unmarshal(b, valToPointer(value.Addr()), wire) + if err != nil { return nil, err } - if o.index >= len(o.buf) { + if len(b) == 0 { break } } @@ -473,9 +429,9 @@ func decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) { // GetExtensions returns a slice of the extensions present in pb that are also listed in es. // The returned slice has the same length as es; missing extensions will appear as nil elements. func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, err error) { - epb, ok := extendable(pb) - if !ok { - return nil, errors.New("proto: not an extendable proto") + epb, err := extendable(pb) + if err != nil { + return nil, err } extensions = make([]interface{}, len(es)) for i, e := range es { @@ -494,9 +450,9 @@ func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, e // For non-registered extensions, ExtensionDescs returns an incomplete descriptor containing // just the Field field, which defines the extension's field number. func ExtensionDescs(pb Message) ([]*ExtensionDesc, error) { - epb, ok := extendable(pb) - if !ok { - return nil, fmt.Errorf("proto: %T is not an extendable proto.Message", pb) + epb, err := extendable(pb) + if err != nil { + return nil, err } registeredExtensions := RegisteredExtensions(pb) @@ -523,9 +479,9 @@ func ExtensionDescs(pb Message) ([]*ExtensionDesc, error) { // SetExtension sets the specified extension of pb to the specified value. func SetExtension(pb Message, extension *ExtensionDesc, value interface{}) error { - epb, ok := extendable(pb) - if !ok { - return errors.New("proto: not an extendable proto") + epb, err := extendable(pb) + if err != nil { + return err } if err := checkExtensionTypes(epb, extension); err != nil { return err @@ -550,8 +506,8 @@ func SetExtension(pb Message, extension *ExtensionDesc, value interface{}) error // ClearAllExtensions clears all extensions from pb. func ClearAllExtensions(pb Message) { - epb, ok := extendable(pb) - if !ok { + epb, err := extendable(pb) + if err != nil { return } m := epb.extensionsWrite() diff --git a/vendor/github.com/golang/protobuf/proto/lib.go b/vendor/github.com/golang/protobuf/proto/lib.go index 1c225504a01..0e2191b8ada 100644 --- a/vendor/github.com/golang/protobuf/proto/lib.go +++ b/vendor/github.com/golang/protobuf/proto/lib.go @@ -265,6 +265,7 @@ package proto import ( "encoding/json" + "errors" "fmt" "log" "reflect" @@ -273,6 +274,8 @@ import ( "sync" ) +var errInvalidUTF8 = errors.New("proto: invalid UTF-8 string") + // Message is implemented by generated protocol buffer messages. type Message interface { Reset() @@ -309,16 +312,7 @@ type Buffer struct { buf []byte // encode/decode byte stream index int // read point - // pools of basic types to amortize allocation. - bools []bool - uint32s []uint32 - uint64s []uint64 - - // extra pools, only used with pointer_reflect.go - int32s []int32 - int64s []int64 - float32s []float32 - float64s []float64 + deterministic bool } // NewBuffer allocates a new Buffer and initializes its internal data to @@ -343,6 +337,30 @@ func (p *Buffer) SetBuf(s []byte) { // Bytes returns the contents of the Buffer. func (p *Buffer) Bytes() []byte { return p.buf } +// SetDeterministic sets whether to use deterministic serialization. +// +// Deterministic serialization guarantees that for a given binary, equal +// messages will always be serialized to the same bytes. This implies: +// +// - Repeated serialization of a message will return the same bytes. +// - Different processes of the same binary (which may be executing on +// different machines) will serialize equal messages to the same bytes. +// +// Note that the deterministic serialization is NOT canonical across +// languages. It is not guaranteed to remain stable over time. It is unstable +// across different builds with schema changes due to unknown fields. +// Users who need canonical serialization (e.g., persistent storage in a +// canonical form, fingerprinting, etc.) should define their own +// canonicalization specification and implement their own serializer rather +// than relying on this API. +// +// If deterministic serialization is requested, map entries will be sorted +// by keys in lexographical order. This is an implementation detail and +// subject to change. +func (p *Buffer) SetDeterministic(deterministic bool) { + p.deterministic = deterministic +} + /* * Helper routines for simplifying the creation of optional fields of basic type. */ @@ -831,22 +849,12 @@ func fieldDefault(ft reflect.Type, prop *Properties) (sf *scalarField, nestedMes return sf, false, nil } +// mapKeys returns a sort.Interface to be used for sorting the map keys. // Map fields may have key types of non-float scalars, strings and enums. -// The easiest way to sort them in some deterministic order is to use fmt. -// If this turns out to be inefficient we can always consider other options, -// such as doing a Schwartzian transform. - func mapKeys(vs []reflect.Value) sort.Interface { - s := mapKeySorter{ - vs: vs, - // default Less function: textual comparison - less: func(a, b reflect.Value) bool { - return fmt.Sprint(a.Interface()) < fmt.Sprint(b.Interface()) - }, - } + s := mapKeySorter{vs: vs} - // Type specialization per https://developers.google.com/protocol-buffers/docs/proto#maps; - // numeric keys are sorted numerically. + // Type specialization per https://developers.google.com/protocol-buffers/docs/proto#maps. if len(vs) == 0 { return s } @@ -855,6 +863,12 @@ func mapKeys(vs []reflect.Value) sort.Interface { s.less = func(a, b reflect.Value) bool { return a.Int() < b.Int() } case reflect.Uint32, reflect.Uint64: s.less = func(a, b reflect.Value) bool { return a.Uint() < b.Uint() } + case reflect.Bool: + s.less = func(a, b reflect.Value) bool { return !a.Bool() && b.Bool() } // false < true + case reflect.String: + s.less = func(a, b reflect.Value) bool { return a.String() < b.String() } + default: + panic(fmt.Sprintf("unsupported map key type: %v", vs[0].Kind())) } return s @@ -895,3 +909,13 @@ const ProtoPackageIsVersion2 = true // ProtoPackageIsVersion1 is referenced from generated protocol buffer files // to assert that that code is compatible with this version of the proto package. const ProtoPackageIsVersion1 = true + +// InternalMessageInfo is a type used internally by generated .pb.go files. +// This type is not intended to be used by non-generated code. +// This type is not subject to any compatibility guarantee. +type InternalMessageInfo struct { + marshal *marshalInfo + unmarshal *unmarshalInfo + merge *mergeInfo + discard *discardInfo +} diff --git a/vendor/github.com/golang/protobuf/proto/message_set.go b/vendor/github.com/golang/protobuf/proto/message_set.go index fd982decd66..3b6ca41d5e5 100644 --- a/vendor/github.com/golang/protobuf/proto/message_set.go +++ b/vendor/github.com/golang/protobuf/proto/message_set.go @@ -42,6 +42,7 @@ import ( "fmt" "reflect" "sort" + "sync" ) // errNoMessageTypeID occurs when a protocol buffer does not have a message type ID. @@ -94,10 +95,7 @@ func (ms *messageSet) find(pb Message) *_MessageSet_Item { } func (ms *messageSet) Has(pb Message) bool { - if ms.find(pb) != nil { - return true - } - return false + return ms.find(pb) != nil } func (ms *messageSet) Unmarshal(pb Message) error { @@ -150,46 +148,42 @@ func skipVarint(buf []byte) []byte { // MarshalMessageSet encodes the extension map represented by m in the message set wire format. // It is called by generated Marshal methods on protocol buffer messages with the message_set_wire_format option. func MarshalMessageSet(exts interface{}) ([]byte, error) { - var m map[int32]Extension + return marshalMessageSet(exts, false) +} + +// marshaMessageSet implements above function, with the opt to turn on / off deterministic during Marshal. +func marshalMessageSet(exts interface{}, deterministic bool) ([]byte, error) { switch exts := exts.(type) { case *XXX_InternalExtensions: - if err := encodeExtensions(exts); err != nil { - return nil, err - } - m, _ = exts.extensionsRead() + var u marshalInfo + siz := u.sizeMessageSet(exts) + b := make([]byte, 0, siz) + return u.appendMessageSet(b, exts, deterministic) + case map[int32]Extension: - if err := encodeExtensionsMap(exts); err != nil { - return nil, err + // This is an old-style extension map. + // Wrap it in a new-style XXX_InternalExtensions. + ie := XXX_InternalExtensions{ + p: &struct { + mu sync.Mutex + extensionMap map[int32]Extension + }{ + extensionMap: exts, + }, } - m = exts + + var u marshalInfo + siz := u.sizeMessageSet(&ie) + b := make([]byte, 0, siz) + return u.appendMessageSet(b, &ie, deterministic) + default: return nil, errors.New("proto: not an extension map") } - - // Sort extension IDs to provide a deterministic encoding. - // See also enc_map in encode.go. - ids := make([]int, 0, len(m)) - for id := range m { - ids = append(ids, int(id)) - } - sort.Ints(ids) - - ms := &messageSet{Item: make([]*_MessageSet_Item, 0, len(m))} - for _, id := range ids { - e := m[int32(id)] - // Remove the wire type and field number varint, as well as the length varint. - msg := skipVarint(skipVarint(e.enc)) - - ms.Item = append(ms.Item, &_MessageSet_Item{ - TypeId: Int32(int32(id)), - Message: msg, - }) - } - return Marshal(ms) } // UnmarshalMessageSet decodes the extension map encoded in buf in the message set wire format. -// It is called by generated Unmarshal methods on protocol buffer messages with the message_set_wire_format option. +// It is called by Unmarshal methods on protocol buffer messages with the message_set_wire_format option. func UnmarshalMessageSet(buf []byte, exts interface{}) error { var m map[int32]Extension switch exts := exts.(type) { @@ -235,7 +229,15 @@ func MarshalMessageSetJSON(exts interface{}) ([]byte, error) { var m map[int32]Extension switch exts := exts.(type) { case *XXX_InternalExtensions: - m, _ = exts.extensionsRead() + var mu sync.Locker + m, mu = exts.extensionsRead() + if m != nil { + // Keep the extensions map locked until we're done marshaling to prevent + // races between marshaling and unmarshaling the lazily-{en,de}coded + // values. + mu.Lock() + defer mu.Unlock() + } case map[int32]Extension: m = exts default: @@ -253,15 +255,16 @@ func MarshalMessageSetJSON(exts interface{}) ([]byte, error) { for i, id := range ids { ext := m[id] - if i > 0 { - b.WriteByte(',') - } - msd, ok := messageSetMap[id] if !ok { // Unknown type; we can't render it, so skip it. continue } + + if i > 0 && b.Len() > 1 { + b.WriteByte(',') + } + fmt.Fprintf(&b, `"[%s]":`, msd.name) x := ext.value diff --git a/vendor/github.com/golang/protobuf/proto/pointer_reflect.go b/vendor/github.com/golang/protobuf/proto/pointer_reflect.go index fb512e2e16d..b6cad90834b 100644 --- a/vendor/github.com/golang/protobuf/proto/pointer_reflect.go +++ b/vendor/github.com/golang/protobuf/proto/pointer_reflect.go @@ -29,7 +29,7 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// +build appengine js +// +build purego appengine js // This file contains an implementation of proto field accesses using package reflect. // It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can @@ -38,32 +38,13 @@ package proto import ( - "math" "reflect" + "sync" ) -// A structPointer is a pointer to a struct. -type structPointer struct { - v reflect.Value -} - -// toStructPointer returns a structPointer equivalent to the given reflect value. -// The reflect value must itself be a pointer to a struct. -func toStructPointer(v reflect.Value) structPointer { - return structPointer{v} -} - -// IsNil reports whether p is nil. -func structPointer_IsNil(p structPointer) bool { - return p.v.IsNil() -} +const unsafeAllowed = false -// Interface returns the struct pointer as an interface value. -func structPointer_Interface(p structPointer, _ reflect.Type) interface{} { - return p.v.Interface() -} - -// A field identifies a field in a struct, accessible from a structPointer. +// A field identifies a field in a struct, accessible from a pointer. // In this implementation, a field is identified by the sequence of field indices // passed to reflect's FieldByIndex. type field []int @@ -76,409 +57,301 @@ func toField(f *reflect.StructField) field { // invalidField is an invalid field identifier. var invalidField = field(nil) +// zeroField is a noop when calling pointer.offset. +var zeroField = field([]int{}) + // IsValid reports whether the field identifier is valid. func (f field) IsValid() bool { return f != nil } -// field returns the given field in the struct as a reflect value. -func structPointer_field(p structPointer, f field) reflect.Value { - // Special case: an extension map entry with a value of type T - // passes a *T to the struct-handling code with a zero field, - // expecting that it will be treated as equivalent to *struct{ X T }, - // which has the same memory layout. We have to handle that case - // specially, because reflect will panic if we call FieldByIndex on a - // non-struct. - if f == nil { - return p.v.Elem() - } - - return p.v.Elem().FieldByIndex(f) +// The pointer type is for the table-driven decoder. +// The implementation here uses a reflect.Value of pointer type to +// create a generic pointer. In pointer_unsafe.go we use unsafe +// instead of reflect to implement the same (but faster) interface. +type pointer struct { + v reflect.Value } -// ifield returns the given field in the struct as an interface value. -func structPointer_ifield(p structPointer, f field) interface{} { - return structPointer_field(p, f).Addr().Interface() +// toPointer converts an interface of pointer type to a pointer +// that points to the same target. +func toPointer(i *Message) pointer { + return pointer{v: reflect.ValueOf(*i)} } -// Bytes returns the address of a []byte field in the struct. -func structPointer_Bytes(p structPointer, f field) *[]byte { - return structPointer_ifield(p, f).(*[]byte) +// toAddrPointer converts an interface to a pointer that points to +// the interface data. +func toAddrPointer(i *interface{}, isptr bool) pointer { + v := reflect.ValueOf(*i) + u := reflect.New(v.Type()) + u.Elem().Set(v) + return pointer{v: u} } -// BytesSlice returns the address of a [][]byte field in the struct. -func structPointer_BytesSlice(p structPointer, f field) *[][]byte { - return structPointer_ifield(p, f).(*[][]byte) +// valToPointer converts v to a pointer. v must be of pointer type. +func valToPointer(v reflect.Value) pointer { + return pointer{v: v} } -// Bool returns the address of a *bool field in the struct. -func structPointer_Bool(p structPointer, f field) **bool { - return structPointer_ifield(p, f).(**bool) +// offset converts from a pointer to a structure to a pointer to +// one of its fields. +func (p pointer) offset(f field) pointer { + return pointer{v: p.v.Elem().FieldByIndex(f).Addr()} } -// BoolVal returns the address of a bool field in the struct. -func structPointer_BoolVal(p structPointer, f field) *bool { - return structPointer_ifield(p, f).(*bool) +func (p pointer) isNil() bool { + return p.v.IsNil() } -// BoolSlice returns the address of a []bool field in the struct. -func structPointer_BoolSlice(p structPointer, f field) *[]bool { - return structPointer_ifield(p, f).(*[]bool) +// grow updates the slice s in place to make it one element longer. +// s must be addressable. +// Returns the (addressable) new element. +func grow(s reflect.Value) reflect.Value { + n, m := s.Len(), s.Cap() + if n < m { + s.SetLen(n + 1) + } else { + s.Set(reflect.Append(s, reflect.Zero(s.Type().Elem()))) + } + return s.Index(n) } -// String returns the address of a *string field in the struct. -func structPointer_String(p structPointer, f field) **string { - return structPointer_ifield(p, f).(**string) +func (p pointer) toInt64() *int64 { + return p.v.Interface().(*int64) } - -// StringVal returns the address of a string field in the struct. -func structPointer_StringVal(p structPointer, f field) *string { - return structPointer_ifield(p, f).(*string) +func (p pointer) toInt64Ptr() **int64 { + return p.v.Interface().(**int64) } - -// StringSlice returns the address of a []string field in the struct. -func structPointer_StringSlice(p structPointer, f field) *[]string { - return structPointer_ifield(p, f).(*[]string) +func (p pointer) toInt64Slice() *[]int64 { + return p.v.Interface().(*[]int64) } -// Extensions returns the address of an extension map field in the struct. -func structPointer_Extensions(p structPointer, f field) *XXX_InternalExtensions { - return structPointer_ifield(p, f).(*XXX_InternalExtensions) -} +var int32ptr = reflect.TypeOf((*int32)(nil)) -// ExtMap returns the address of an extension map field in the struct. -func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension { - return structPointer_ifield(p, f).(*map[int32]Extension) +func (p pointer) toInt32() *int32 { + return p.v.Convert(int32ptr).Interface().(*int32) } -// NewAt returns the reflect.Value for a pointer to a field in the struct. -func structPointer_NewAt(p structPointer, f field, typ reflect.Type) reflect.Value { - return structPointer_field(p, f).Addr() +// The toInt32Ptr/Slice methods don't work because of enums. +// Instead, we must use set/get methods for the int32ptr/slice case. +/* + func (p pointer) toInt32Ptr() **int32 { + return p.v.Interface().(**int32) } - -// SetStructPointer writes a *struct field in the struct. -func structPointer_SetStructPointer(p structPointer, f field, q structPointer) { - structPointer_field(p, f).Set(q.v) + func (p pointer) toInt32Slice() *[]int32 { + return p.v.Interface().(*[]int32) } - -// GetStructPointer reads a *struct field in the struct. -func structPointer_GetStructPointer(p structPointer, f field) structPointer { - return structPointer{structPointer_field(p, f)} +*/ +func (p pointer) getInt32Ptr() *int32 { + if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) { + // raw int32 type + return p.v.Elem().Interface().(*int32) + } + // an enum + return p.v.Elem().Convert(int32PtrType).Interface().(*int32) +} +func (p pointer) setInt32Ptr(v int32) { + // Allocate value in a *int32. Possibly convert that to a *enum. + // Then assign it to a **int32 or **enum. + // Note: we can convert *int32 to *enum, but we can't convert + // **int32 to **enum! + p.v.Elem().Set(reflect.ValueOf(&v).Convert(p.v.Type().Elem())) +} + +// getInt32Slice copies []int32 from p as a new slice. +// This behavior differs from the implementation in pointer_unsafe.go. +func (p pointer) getInt32Slice() []int32 { + if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) { + // raw int32 type + return p.v.Elem().Interface().([]int32) + } + // an enum + // Allocate a []int32, then assign []enum's values into it. + // Note: we can't convert []enum to []int32. + slice := p.v.Elem() + s := make([]int32, slice.Len()) + for i := 0; i < slice.Len(); i++ { + s[i] = int32(slice.Index(i).Int()) + } + return s } -// StructPointerSlice the address of a []*struct field in the struct. -func structPointer_StructPointerSlice(p structPointer, f field) structPointerSlice { - return structPointerSlice{structPointer_field(p, f)} +// setInt32Slice copies []int32 into p as a new slice. +// This behavior differs from the implementation in pointer_unsafe.go. +func (p pointer) setInt32Slice(v []int32) { + if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) { + // raw int32 type + p.v.Elem().Set(reflect.ValueOf(v)) + return + } + // an enum + // Allocate a []enum, then assign []int32's values into it. + // Note: we can't convert []enum to []int32. + slice := reflect.MakeSlice(p.v.Type().Elem(), len(v), cap(v)) + for i, x := range v { + slice.Index(i).SetInt(int64(x)) + } + p.v.Elem().Set(slice) } - -// A structPointerSlice represents the address of a slice of pointers to structs -// (themselves messages or groups). That is, v.Type() is *[]*struct{...}. -type structPointerSlice struct { - v reflect.Value +func (p pointer) appendInt32Slice(v int32) { + grow(p.v.Elem()).SetInt(int64(v)) } -func (p structPointerSlice) Len() int { return p.v.Len() } -func (p structPointerSlice) Index(i int) structPointer { return structPointer{p.v.Index(i)} } -func (p structPointerSlice) Append(q structPointer) { - p.v.Set(reflect.Append(p.v, q.v)) +func (p pointer) toUint64() *uint64 { + return p.v.Interface().(*uint64) } - -var ( - int32Type = reflect.TypeOf(int32(0)) - uint32Type = reflect.TypeOf(uint32(0)) - float32Type = reflect.TypeOf(float32(0)) - int64Type = reflect.TypeOf(int64(0)) - uint64Type = reflect.TypeOf(uint64(0)) - float64Type = reflect.TypeOf(float64(0)) -) - -// A word32 represents a field of type *int32, *uint32, *float32, or *enum. -// That is, v.Type() is *int32, *uint32, *float32, or *enum and v is assignable. -type word32 struct { - v reflect.Value +func (p pointer) toUint64Ptr() **uint64 { + return p.v.Interface().(**uint64) } - -// IsNil reports whether p is nil. -func word32_IsNil(p word32) bool { - return p.v.IsNil() +func (p pointer) toUint64Slice() *[]uint64 { + return p.v.Interface().(*[]uint64) } - -// Set sets p to point at a newly allocated word with bits set to x. -func word32_Set(p word32, o *Buffer, x uint32) { - t := p.v.Type().Elem() - switch t { - case int32Type: - if len(o.int32s) == 0 { - o.int32s = make([]int32, uint32PoolSize) - } - o.int32s[0] = int32(x) - p.v.Set(reflect.ValueOf(&o.int32s[0])) - o.int32s = o.int32s[1:] - return - case uint32Type: - if len(o.uint32s) == 0 { - o.uint32s = make([]uint32, uint32PoolSize) - } - o.uint32s[0] = x - p.v.Set(reflect.ValueOf(&o.uint32s[0])) - o.uint32s = o.uint32s[1:] - return - case float32Type: - if len(o.float32s) == 0 { - o.float32s = make([]float32, uint32PoolSize) - } - o.float32s[0] = math.Float32frombits(x) - p.v.Set(reflect.ValueOf(&o.float32s[0])) - o.float32s = o.float32s[1:] - return - } - - // must be enum - p.v.Set(reflect.New(t)) - p.v.Elem().SetInt(int64(int32(x))) +func (p pointer) toUint32() *uint32 { + return p.v.Interface().(*uint32) } - -// Get gets the bits pointed at by p, as a uint32. -func word32_Get(p word32) uint32 { - elem := p.v.Elem() - switch elem.Kind() { - case reflect.Int32: - return uint32(elem.Int()) - case reflect.Uint32: - return uint32(elem.Uint()) - case reflect.Float32: - return math.Float32bits(float32(elem.Float())) - } - panic("unreachable") +func (p pointer) toUint32Ptr() **uint32 { + return p.v.Interface().(**uint32) } - -// Word32 returns a reference to a *int32, *uint32, *float32, or *enum field in the struct. -func structPointer_Word32(p structPointer, f field) word32 { - return word32{structPointer_field(p, f)} +func (p pointer) toUint32Slice() *[]uint32 { + return p.v.Interface().(*[]uint32) } - -// A word32Val represents a field of type int32, uint32, float32, or enum. -// That is, v.Type() is int32, uint32, float32, or enum and v is assignable. -type word32Val struct { - v reflect.Value +func (p pointer) toBool() *bool { + return p.v.Interface().(*bool) } - -// Set sets *p to x. -func word32Val_Set(p word32Val, x uint32) { - switch p.v.Type() { - case int32Type: - p.v.SetInt(int64(x)) - return - case uint32Type: - p.v.SetUint(uint64(x)) - return - case float32Type: - p.v.SetFloat(float64(math.Float32frombits(x))) - return - } - - // must be enum - p.v.SetInt(int64(int32(x))) +func (p pointer) toBoolPtr() **bool { + return p.v.Interface().(**bool) } - -// Get gets the bits pointed at by p, as a uint32. -func word32Val_Get(p word32Val) uint32 { - elem := p.v - switch elem.Kind() { - case reflect.Int32: - return uint32(elem.Int()) - case reflect.Uint32: - return uint32(elem.Uint()) - case reflect.Float32: - return math.Float32bits(float32(elem.Float())) - } - panic("unreachable") +func (p pointer) toBoolSlice() *[]bool { + return p.v.Interface().(*[]bool) } - -// Word32Val returns a reference to a int32, uint32, float32, or enum field in the struct. -func structPointer_Word32Val(p structPointer, f field) word32Val { - return word32Val{structPointer_field(p, f)} +func (p pointer) toFloat64() *float64 { + return p.v.Interface().(*float64) } - -// A word32Slice is a slice of 32-bit values. -// That is, v.Type() is []int32, []uint32, []float32, or []enum. -type word32Slice struct { - v reflect.Value +func (p pointer) toFloat64Ptr() **float64 { + return p.v.Interface().(**float64) } - -func (p word32Slice) Append(x uint32) { - n, m := p.v.Len(), p.v.Cap() - if n < m { - p.v.SetLen(n + 1) - } else { - t := p.v.Type().Elem() - p.v.Set(reflect.Append(p.v, reflect.Zero(t))) - } - elem := p.v.Index(n) - switch elem.Kind() { - case reflect.Int32: - elem.SetInt(int64(int32(x))) - case reflect.Uint32: - elem.SetUint(uint64(x)) - case reflect.Float32: - elem.SetFloat(float64(math.Float32frombits(x))) - } +func (p pointer) toFloat64Slice() *[]float64 { + return p.v.Interface().(*[]float64) } - -func (p word32Slice) Len() int { - return p.v.Len() +func (p pointer) toFloat32() *float32 { + return p.v.Interface().(*float32) } - -func (p word32Slice) Index(i int) uint32 { - elem := p.v.Index(i) - switch elem.Kind() { - case reflect.Int32: - return uint32(elem.Int()) - case reflect.Uint32: - return uint32(elem.Uint()) - case reflect.Float32: - return math.Float32bits(float32(elem.Float())) - } - panic("unreachable") +func (p pointer) toFloat32Ptr() **float32 { + return p.v.Interface().(**float32) } - -// Word32Slice returns a reference to a []int32, []uint32, []float32, or []enum field in the struct. -func structPointer_Word32Slice(p structPointer, f field) word32Slice { - return word32Slice{structPointer_field(p, f)} +func (p pointer) toFloat32Slice() *[]float32 { + return p.v.Interface().(*[]float32) } - -// word64 is like word32 but for 64-bit values. -type word64 struct { - v reflect.Value +func (p pointer) toString() *string { + return p.v.Interface().(*string) } - -func word64_Set(p word64, o *Buffer, x uint64) { - t := p.v.Type().Elem() - switch t { - case int64Type: - if len(o.int64s) == 0 { - o.int64s = make([]int64, uint64PoolSize) - } - o.int64s[0] = int64(x) - p.v.Set(reflect.ValueOf(&o.int64s[0])) - o.int64s = o.int64s[1:] - return - case uint64Type: - if len(o.uint64s) == 0 { - o.uint64s = make([]uint64, uint64PoolSize) - } - o.uint64s[0] = x - p.v.Set(reflect.ValueOf(&o.uint64s[0])) - o.uint64s = o.uint64s[1:] - return - case float64Type: - if len(o.float64s) == 0 { - o.float64s = make([]float64, uint64PoolSize) - } - o.float64s[0] = math.Float64frombits(x) - p.v.Set(reflect.ValueOf(&o.float64s[0])) - o.float64s = o.float64s[1:] - return - } - panic("unreachable") +func (p pointer) toStringPtr() **string { + return p.v.Interface().(**string) } - -func word64_IsNil(p word64) bool { - return p.v.IsNil() +func (p pointer) toStringSlice() *[]string { + return p.v.Interface().(*[]string) } - -func word64_Get(p word64) uint64 { - elem := p.v.Elem() - switch elem.Kind() { - case reflect.Int64: - return uint64(elem.Int()) - case reflect.Uint64: - return elem.Uint() - case reflect.Float64: - return math.Float64bits(elem.Float()) - } - panic("unreachable") +func (p pointer) toBytes() *[]byte { + return p.v.Interface().(*[]byte) } - -func structPointer_Word64(p structPointer, f field) word64 { - return word64{structPointer_field(p, f)} +func (p pointer) toBytesSlice() *[][]byte { + return p.v.Interface().(*[][]byte) +} +func (p pointer) toExtensions() *XXX_InternalExtensions { + return p.v.Interface().(*XXX_InternalExtensions) +} +func (p pointer) toOldExtensions() *map[int32]Extension { + return p.v.Interface().(*map[int32]Extension) +} +func (p pointer) getPointer() pointer { + return pointer{v: p.v.Elem()} +} +func (p pointer) setPointer(q pointer) { + p.v.Elem().Set(q.v) +} +func (p pointer) appendPointer(q pointer) { + grow(p.v.Elem()).Set(q.v) } -// word64Val is like word32Val but for 64-bit values. -type word64Val struct { - v reflect.Value +// getPointerSlice copies []*T from p as a new []pointer. +// This behavior differs from the implementation in pointer_unsafe.go. +func (p pointer) getPointerSlice() []pointer { + if p.v.IsNil() { + return nil + } + n := p.v.Elem().Len() + s := make([]pointer, n) + for i := 0; i < n; i++ { + s[i] = pointer{v: p.v.Elem().Index(i)} + } + return s } -func word64Val_Set(p word64Val, o *Buffer, x uint64) { - switch p.v.Type() { - case int64Type: - p.v.SetInt(int64(x)) - return - case uint64Type: - p.v.SetUint(x) - return - case float64Type: - p.v.SetFloat(math.Float64frombits(x)) +// setPointerSlice copies []pointer into p as a new []*T. +// This behavior differs from the implementation in pointer_unsafe.go. +func (p pointer) setPointerSlice(v []pointer) { + if v == nil { + p.v.Elem().Set(reflect.New(p.v.Elem().Type()).Elem()) return } - panic("unreachable") + s := reflect.MakeSlice(p.v.Elem().Type(), 0, len(v)) + for _, p := range v { + s = reflect.Append(s, p.v) + } + p.v.Elem().Set(s) } -func word64Val_Get(p word64Val) uint64 { - elem := p.v - switch elem.Kind() { - case reflect.Int64: - return uint64(elem.Int()) - case reflect.Uint64: - return elem.Uint() - case reflect.Float64: - return math.Float64bits(elem.Float()) +// getInterfacePointer returns a pointer that points to the +// interface data of the interface pointed by p. +func (p pointer) getInterfacePointer() pointer { + if p.v.Elem().IsNil() { + return pointer{v: p.v.Elem()} } - panic("unreachable") + return pointer{v: p.v.Elem().Elem().Elem().Field(0).Addr()} // *interface -> interface -> *struct -> struct } -func structPointer_Word64Val(p structPointer, f field) word64Val { - return word64Val{structPointer_field(p, f)} +func (p pointer) asPointerTo(t reflect.Type) reflect.Value { + // TODO: check that p.v.Type().Elem() == t? + return p.v } -type word64Slice struct { - v reflect.Value +func atomicLoadUnmarshalInfo(p **unmarshalInfo) *unmarshalInfo { + atomicLock.Lock() + defer atomicLock.Unlock() + return *p } - -func (p word64Slice) Append(x uint64) { - n, m := p.v.Len(), p.v.Cap() - if n < m { - p.v.SetLen(n + 1) - } else { - t := p.v.Type().Elem() - p.v.Set(reflect.Append(p.v, reflect.Zero(t))) - } - elem := p.v.Index(n) - switch elem.Kind() { - case reflect.Int64: - elem.SetInt(int64(int64(x))) - case reflect.Uint64: - elem.SetUint(uint64(x)) - case reflect.Float64: - elem.SetFloat(float64(math.Float64frombits(x))) - } +func atomicStoreUnmarshalInfo(p **unmarshalInfo, v *unmarshalInfo) { + atomicLock.Lock() + defer atomicLock.Unlock() + *p = v } - -func (p word64Slice) Len() int { - return p.v.Len() +func atomicLoadMarshalInfo(p **marshalInfo) *marshalInfo { + atomicLock.Lock() + defer atomicLock.Unlock() + return *p } - -func (p word64Slice) Index(i int) uint64 { - elem := p.v.Index(i) - switch elem.Kind() { - case reflect.Int64: - return uint64(elem.Int()) - case reflect.Uint64: - return uint64(elem.Uint()) - case reflect.Float64: - return math.Float64bits(float64(elem.Float())) - } - panic("unreachable") +func atomicStoreMarshalInfo(p **marshalInfo, v *marshalInfo) { + atomicLock.Lock() + defer atomicLock.Unlock() + *p = v } - -func structPointer_Word64Slice(p structPointer, f field) word64Slice { - return word64Slice{structPointer_field(p, f)} +func atomicLoadMergeInfo(p **mergeInfo) *mergeInfo { + atomicLock.Lock() + defer atomicLock.Unlock() + return *p +} +func atomicStoreMergeInfo(p **mergeInfo, v *mergeInfo) { + atomicLock.Lock() + defer atomicLock.Unlock() + *p = v } +func atomicLoadDiscardInfo(p **discardInfo) *discardInfo { + atomicLock.Lock() + defer atomicLock.Unlock() + return *p +} +func atomicStoreDiscardInfo(p **discardInfo, v *discardInfo) { + atomicLock.Lock() + defer atomicLock.Unlock() + *p = v +} + +var atomicLock sync.Mutex diff --git a/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go b/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go index 6b5567d47cd..d55a335d945 100644 --- a/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go +++ b/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go @@ -29,7 +29,7 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// +build !appengine,!js +// +build !purego,!appengine,!js // This file contains the implementation of the proto field accesses using package unsafe. @@ -37,38 +37,13 @@ package proto import ( "reflect" + "sync/atomic" "unsafe" ) -// NOTE: These type_Foo functions would more idiomatically be methods, -// but Go does not allow methods on pointer types, and we must preserve -// some pointer type for the garbage collector. We use these -// funcs with clunky names as our poor approximation to methods. -// -// An alternative would be -// type structPointer struct { p unsafe.Pointer } -// but that does not registerize as well. - -// A structPointer is a pointer to a struct. -type structPointer unsafe.Pointer - -// toStructPointer returns a structPointer equivalent to the given reflect value. -func toStructPointer(v reflect.Value) structPointer { - return structPointer(unsafe.Pointer(v.Pointer())) -} - -// IsNil reports whether p is nil. -func structPointer_IsNil(p structPointer) bool { - return p == nil -} - -// Interface returns the struct pointer, assumed to have element type t, -// as an interface value. -func structPointer_Interface(p structPointer, t reflect.Type) interface{} { - return reflect.NewAt(t, unsafe.Pointer(p)).Interface() -} +const unsafeAllowed = true -// A field identifies a field in a struct, accessible from a structPointer. +// A field identifies a field in a struct, accessible from a pointer. // In this implementation, a field is identified by its byte offset from the start of the struct. type field uintptr @@ -80,191 +55,254 @@ func toField(f *reflect.StructField) field { // invalidField is an invalid field identifier. const invalidField = ^field(0) +// zeroField is a noop when calling pointer.offset. +const zeroField = field(0) + // IsValid reports whether the field identifier is valid. func (f field) IsValid() bool { - return f != ^field(0) + return f != invalidField } -// Bytes returns the address of a []byte field in the struct. -func structPointer_Bytes(p structPointer, f field) *[]byte { - return (*[]byte)(unsafe.Pointer(uintptr(p) + uintptr(f))) +// The pointer type below is for the new table-driven encoder/decoder. +// The implementation here uses unsafe.Pointer to create a generic pointer. +// In pointer_reflect.go we use reflect instead of unsafe to implement +// the same (but slower) interface. +type pointer struct { + p unsafe.Pointer } -// BytesSlice returns the address of a [][]byte field in the struct. -func structPointer_BytesSlice(p structPointer, f field) *[][]byte { - return (*[][]byte)(unsafe.Pointer(uintptr(p) + uintptr(f))) -} +// size of pointer +var ptrSize = unsafe.Sizeof(uintptr(0)) -// Bool returns the address of a *bool field in the struct. -func structPointer_Bool(p structPointer, f field) **bool { - return (**bool)(unsafe.Pointer(uintptr(p) + uintptr(f))) +// toPointer converts an interface of pointer type to a pointer +// that points to the same target. +func toPointer(i *Message) pointer { + // Super-tricky - read pointer out of data word of interface value. + // Saves ~25ns over the equivalent: + // return valToPointer(reflect.ValueOf(*i)) + return pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]} } -// BoolVal returns the address of a bool field in the struct. -func structPointer_BoolVal(p structPointer, f field) *bool { - return (*bool)(unsafe.Pointer(uintptr(p) + uintptr(f))) +// toAddrPointer converts an interface to a pointer that points to +// the interface data. +func toAddrPointer(i *interface{}, isptr bool) pointer { + // Super-tricky - read or get the address of data word of interface value. + if isptr { + // The interface is of pointer type, thus it is a direct interface. + // The data word is the pointer data itself. We take its address. + return pointer{p: unsafe.Pointer(uintptr(unsafe.Pointer(i)) + ptrSize)} + } + // The interface is not of pointer type. The data word is the pointer + // to the data. + return pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]} } -// BoolSlice returns the address of a []bool field in the struct. -func structPointer_BoolSlice(p structPointer, f field) *[]bool { - return (*[]bool)(unsafe.Pointer(uintptr(p) + uintptr(f))) +// valToPointer converts v to a pointer. v must be of pointer type. +func valToPointer(v reflect.Value) pointer { + return pointer{p: unsafe.Pointer(v.Pointer())} } -// String returns the address of a *string field in the struct. -func structPointer_String(p structPointer, f field) **string { - return (**string)(unsafe.Pointer(uintptr(p) + uintptr(f))) +// offset converts from a pointer to a structure to a pointer to +// one of its fields. +func (p pointer) offset(f field) pointer { + // For safety, we should panic if !f.IsValid, however calling panic causes + // this to no longer be inlineable, which is a serious performance cost. + /* + if !f.IsValid() { + panic("invalid field") + } + */ + return pointer{p: unsafe.Pointer(uintptr(p.p) + uintptr(f))} } -// StringVal returns the address of a string field in the struct. -func structPointer_StringVal(p structPointer, f field) *string { - return (*string)(unsafe.Pointer(uintptr(p) + uintptr(f))) +func (p pointer) isNil() bool { + return p.p == nil } -// StringSlice returns the address of a []string field in the struct. -func structPointer_StringSlice(p structPointer, f field) *[]string { - return (*[]string)(unsafe.Pointer(uintptr(p) + uintptr(f))) +func (p pointer) toInt64() *int64 { + return (*int64)(p.p) } - -// ExtMap returns the address of an extension map field in the struct. -func structPointer_Extensions(p structPointer, f field) *XXX_InternalExtensions { - return (*XXX_InternalExtensions)(unsafe.Pointer(uintptr(p) + uintptr(f))) +func (p pointer) toInt64Ptr() **int64 { + return (**int64)(p.p) } - -func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension { - return (*map[int32]Extension)(unsafe.Pointer(uintptr(p) + uintptr(f))) +func (p pointer) toInt64Slice() *[]int64 { + return (*[]int64)(p.p) } - -// NewAt returns the reflect.Value for a pointer to a field in the struct. -func structPointer_NewAt(p structPointer, f field, typ reflect.Type) reflect.Value { - return reflect.NewAt(typ, unsafe.Pointer(uintptr(p)+uintptr(f))) +func (p pointer) toInt32() *int32 { + return (*int32)(p.p) } -// SetStructPointer writes a *struct field in the struct. -func structPointer_SetStructPointer(p structPointer, f field, q structPointer) { - *(*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f))) = q +// See pointer_reflect.go for why toInt32Ptr/Slice doesn't exist. +/* + func (p pointer) toInt32Ptr() **int32 { + return (**int32)(p.p) + } + func (p pointer) toInt32Slice() *[]int32 { + return (*[]int32)(p.p) + } +*/ +func (p pointer) getInt32Ptr() *int32 { + return *(**int32)(p.p) } - -// GetStructPointer reads a *struct field in the struct. -func structPointer_GetStructPointer(p structPointer, f field) structPointer { - return *(*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f))) +func (p pointer) setInt32Ptr(v int32) { + *(**int32)(p.p) = &v } -// StructPointerSlice the address of a []*struct field in the struct. -func structPointer_StructPointerSlice(p structPointer, f field) *structPointerSlice { - return (*structPointerSlice)(unsafe.Pointer(uintptr(p) + uintptr(f))) +// getInt32Slice loads a []int32 from p. +// The value returned is aliased with the original slice. +// This behavior differs from the implementation in pointer_reflect.go. +func (p pointer) getInt32Slice() []int32 { + return *(*[]int32)(p.p) } -// A structPointerSlice represents a slice of pointers to structs (themselves submessages or groups). -type structPointerSlice []structPointer - -func (v *structPointerSlice) Len() int { return len(*v) } -func (v *structPointerSlice) Index(i int) structPointer { return (*v)[i] } -func (v *structPointerSlice) Append(p structPointer) { *v = append(*v, p) } - -// A word32 is the address of a "pointer to 32-bit value" field. -type word32 **uint32 - -// IsNil reports whether *v is nil. -func word32_IsNil(p word32) bool { - return *p == nil +// setInt32Slice stores a []int32 to p. +// The value set is aliased with the input slice. +// This behavior differs from the implementation in pointer_reflect.go. +func (p pointer) setInt32Slice(v []int32) { + *(*[]int32)(p.p) = v } -// Set sets *v to point at a newly allocated word set to x. -func word32_Set(p word32, o *Buffer, x uint32) { - if len(o.uint32s) == 0 { - o.uint32s = make([]uint32, uint32PoolSize) - } - o.uint32s[0] = x - *p = &o.uint32s[0] - o.uint32s = o.uint32s[1:] +// TODO: Can we get rid of appendInt32Slice and use setInt32Slice instead? +func (p pointer) appendInt32Slice(v int32) { + s := (*[]int32)(p.p) + *s = append(*s, v) } -// Get gets the value pointed at by *v. -func word32_Get(p word32) uint32 { - return **p +func (p pointer) toUint64() *uint64 { + return (*uint64)(p.p) } - -// Word32 returns the address of a *int32, *uint32, *float32, or *enum field in the struct. -func structPointer_Word32(p structPointer, f field) word32 { - return word32((**uint32)(unsafe.Pointer(uintptr(p) + uintptr(f)))) +func (p pointer) toUint64Ptr() **uint64 { + return (**uint64)(p.p) } - -// A word32Val is the address of a 32-bit value field. -type word32Val *uint32 - -// Set sets *p to x. -func word32Val_Set(p word32Val, x uint32) { - *p = x +func (p pointer) toUint64Slice() *[]uint64 { + return (*[]uint64)(p.p) } - -// Get gets the value pointed at by p. -func word32Val_Get(p word32Val) uint32 { - return *p +func (p pointer) toUint32() *uint32 { + return (*uint32)(p.p) } - -// Word32Val returns the address of a *int32, *uint32, *float32, or *enum field in the struct. -func structPointer_Word32Val(p structPointer, f field) word32Val { - return word32Val((*uint32)(unsafe.Pointer(uintptr(p) + uintptr(f)))) +func (p pointer) toUint32Ptr() **uint32 { + return (**uint32)(p.p) } - -// A word32Slice is a slice of 32-bit values. -type word32Slice []uint32 - -func (v *word32Slice) Append(x uint32) { *v = append(*v, x) } -func (v *word32Slice) Len() int { return len(*v) } -func (v *word32Slice) Index(i int) uint32 { return (*v)[i] } - -// Word32Slice returns the address of a []int32, []uint32, []float32, or []enum field in the struct. -func structPointer_Word32Slice(p structPointer, f field) *word32Slice { - return (*word32Slice)(unsafe.Pointer(uintptr(p) + uintptr(f))) +func (p pointer) toUint32Slice() *[]uint32 { + return (*[]uint32)(p.p) } - -// word64 is like word32 but for 64-bit values. -type word64 **uint64 - -func word64_Set(p word64, o *Buffer, x uint64) { - if len(o.uint64s) == 0 { - o.uint64s = make([]uint64, uint64PoolSize) - } - o.uint64s[0] = x - *p = &o.uint64s[0] - o.uint64s = o.uint64s[1:] +func (p pointer) toBool() *bool { + return (*bool)(p.p) } - -func word64_IsNil(p word64) bool { - return *p == nil +func (p pointer) toBoolPtr() **bool { + return (**bool)(p.p) } - -func word64_Get(p word64) uint64 { - return **p +func (p pointer) toBoolSlice() *[]bool { + return (*[]bool)(p.p) +} +func (p pointer) toFloat64() *float64 { + return (*float64)(p.p) +} +func (p pointer) toFloat64Ptr() **float64 { + return (**float64)(p.p) +} +func (p pointer) toFloat64Slice() *[]float64 { + return (*[]float64)(p.p) +} +func (p pointer) toFloat32() *float32 { + return (*float32)(p.p) +} +func (p pointer) toFloat32Ptr() **float32 { + return (**float32)(p.p) +} +func (p pointer) toFloat32Slice() *[]float32 { + return (*[]float32)(p.p) +} +func (p pointer) toString() *string { + return (*string)(p.p) +} +func (p pointer) toStringPtr() **string { + return (**string)(p.p) +} +func (p pointer) toStringSlice() *[]string { + return (*[]string)(p.p) +} +func (p pointer) toBytes() *[]byte { + return (*[]byte)(p.p) +} +func (p pointer) toBytesSlice() *[][]byte { + return (*[][]byte)(p.p) +} +func (p pointer) toExtensions() *XXX_InternalExtensions { + return (*XXX_InternalExtensions)(p.p) +} +func (p pointer) toOldExtensions() *map[int32]Extension { + return (*map[int32]Extension)(p.p) } -func structPointer_Word64(p structPointer, f field) word64 { - return word64((**uint64)(unsafe.Pointer(uintptr(p) + uintptr(f)))) +// getPointerSlice loads []*T from p as a []pointer. +// The value returned is aliased with the original slice. +// This behavior differs from the implementation in pointer_reflect.go. +func (p pointer) getPointerSlice() []pointer { + // Super-tricky - p should point to a []*T where T is a + // message type. We load it as []pointer. + return *(*[]pointer)(p.p) } -// word64Val is like word32Val but for 64-bit values. -type word64Val *uint64 +// setPointerSlice stores []pointer into p as a []*T. +// The value set is aliased with the input slice. +// This behavior differs from the implementation in pointer_reflect.go. +func (p pointer) setPointerSlice(v []pointer) { + // Super-tricky - p should point to a []*T where T is a + // message type. We store it as []pointer. + *(*[]pointer)(p.p) = v +} -func word64Val_Set(p word64Val, o *Buffer, x uint64) { - *p = x +// getPointer loads the pointer at p and returns it. +func (p pointer) getPointer() pointer { + return pointer{p: *(*unsafe.Pointer)(p.p)} } -func word64Val_Get(p word64Val) uint64 { - return *p +// setPointer stores the pointer q at p. +func (p pointer) setPointer(q pointer) { + *(*unsafe.Pointer)(p.p) = q.p } -func structPointer_Word64Val(p structPointer, f field) word64Val { - return word64Val((*uint64)(unsafe.Pointer(uintptr(p) + uintptr(f)))) +// append q to the slice pointed to by p. +func (p pointer) appendPointer(q pointer) { + s := (*[]unsafe.Pointer)(p.p) + *s = append(*s, q.p) } -// word64Slice is like word32Slice but for 64-bit values. -type word64Slice []uint64 +// getInterfacePointer returns a pointer that points to the +// interface data of the interface pointed by p. +func (p pointer) getInterfacePointer() pointer { + // Super-tricky - read pointer out of data word of interface value. + return pointer{p: (*(*[2]unsafe.Pointer)(p.p))[1]} +} -func (v *word64Slice) Append(x uint64) { *v = append(*v, x) } -func (v *word64Slice) Len() int { return len(*v) } -func (v *word64Slice) Index(i int) uint64 { return (*v)[i] } +// asPointerTo returns a reflect.Value that is a pointer to an +// object of type t stored at p. +func (p pointer) asPointerTo(t reflect.Type) reflect.Value { + return reflect.NewAt(t, p.p) +} -func structPointer_Word64Slice(p structPointer, f field) *word64Slice { - return (*word64Slice)(unsafe.Pointer(uintptr(p) + uintptr(f))) +func atomicLoadUnmarshalInfo(p **unmarshalInfo) *unmarshalInfo { + return (*unmarshalInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) +} +func atomicStoreUnmarshalInfo(p **unmarshalInfo, v *unmarshalInfo) { + atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) +} +func atomicLoadMarshalInfo(p **marshalInfo) *marshalInfo { + return (*marshalInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) +} +func atomicStoreMarshalInfo(p **marshalInfo, v *marshalInfo) { + atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) +} +func atomicLoadMergeInfo(p **mergeInfo) *mergeInfo { + return (*mergeInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) +} +func atomicStoreMergeInfo(p **mergeInfo, v *mergeInfo) { + atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) +} +func atomicLoadDiscardInfo(p **discardInfo) *discardInfo { + return (*discardInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) +} +func atomicStoreDiscardInfo(p **discardInfo, v *discardInfo) { + atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) } diff --git a/vendor/github.com/golang/protobuf/proto/properties.go b/vendor/github.com/golang/protobuf/proto/properties.go index ec2289c0058..f710adab092 100644 --- a/vendor/github.com/golang/protobuf/proto/properties.go +++ b/vendor/github.com/golang/protobuf/proto/properties.go @@ -58,42 +58,6 @@ const ( WireFixed32 = 5 ) -const startSize = 10 // initial slice/string sizes - -// Encoders are defined in encode.go -// An encoder outputs the full representation of a field, including its -// tag and encoder type. -type encoder func(p *Buffer, prop *Properties, base structPointer) error - -// A valueEncoder encodes a single integer in a particular encoding. -type valueEncoder func(o *Buffer, x uint64) error - -// Sizers are defined in encode.go -// A sizer returns the encoded size of a field, including its tag and encoder -// type. -type sizer func(prop *Properties, base structPointer) int - -// A valueSizer returns the encoded size of a single integer in a particular -// encoding. -type valueSizer func(x uint64) int - -// Decoders are defined in decode.go -// A decoder creates a value from its wire representation. -// Unrecognized subelements are saved in unrec. -type decoder func(p *Buffer, prop *Properties, base structPointer) error - -// A valueDecoder decodes a single integer in a particular encoding. -type valueDecoder func(o *Buffer) (x uint64, err error) - -// A oneofMarshaler does the marshaling for all oneof fields in a message. -type oneofMarshaler func(Message, *Buffer) error - -// A oneofUnmarshaler does the unmarshaling for a oneof field in a message. -type oneofUnmarshaler func(Message, int, int, *Buffer) (bool, error) - -// A oneofSizer does the sizing for all oneof fields in a message. -type oneofSizer func(Message) int - // tagMap is an optimization over map[int]int for typical protocol buffer // use-cases. Encoded protocol buffers are often in tag order with small tag // numbers. @@ -140,13 +104,6 @@ type StructProperties struct { decoderTags tagMap // map from proto tag to struct field number decoderOrigNames map[string]int // map from original name to struct field number order []int // list of struct field numbers in tag order - unrecField field // field id of the XXX_unrecognized []byte field - extendable bool // is this an extendable proto - - oneofMarshaler oneofMarshaler - oneofUnmarshaler oneofUnmarshaler - oneofSizer oneofSizer - stype reflect.Type // OneofTypes contains information about the oneof fields in this message. // It is keyed by the original name of a field. @@ -187,36 +144,19 @@ type Properties struct { Default string // default value HasDefault bool // whether an explicit default was provided - def_uint64 uint64 - - enc encoder - valEnc valueEncoder // set for bool and numeric types only - field field - tagcode []byte // encoding of EncodeVarint((Tag<<3)|WireType) - tagbuf [8]byte - stype reflect.Type // set for struct types only - sprop *StructProperties // set for struct types only - isMarshaler bool - isUnmarshaler bool + + stype reflect.Type // set for struct types only + sprop *StructProperties // set for struct types only mtype reflect.Type // set for map types only mkeyprop *Properties // set for map types only mvalprop *Properties // set for map types only - - size sizer - valSize valueSizer // set for bool and numeric types only - - dec decoder - valDec valueDecoder // set for bool and numeric types only - - // If this is a packable field, this will be the decoder for the packed version of the field. - packedDec decoder } // String formats the properties in the protobuf struct field tag style. func (p *Properties) String() string { s := p.Wire - s = "," + s += "," s += strconv.Itoa(p.Tag) if p.Required { s += ",req" @@ -262,29 +202,14 @@ func (p *Properties) Parse(s string) { switch p.Wire { case "varint": p.WireType = WireVarint - p.valEnc = (*Buffer).EncodeVarint - p.valDec = (*Buffer).DecodeVarint - p.valSize = sizeVarint case "fixed32": p.WireType = WireFixed32 - p.valEnc = (*Buffer).EncodeFixed32 - p.valDec = (*Buffer).DecodeFixed32 - p.valSize = sizeFixed32 case "fixed64": p.WireType = WireFixed64 - p.valEnc = (*Buffer).EncodeFixed64 - p.valDec = (*Buffer).DecodeFixed64 - p.valSize = sizeFixed64 case "zigzag32": p.WireType = WireVarint - p.valEnc = (*Buffer).EncodeZigzag32 - p.valDec = (*Buffer).DecodeZigzag32 - p.valSize = sizeZigzag32 case "zigzag64": p.WireType = WireVarint - p.valEnc = (*Buffer).EncodeZigzag64 - p.valDec = (*Buffer).DecodeZigzag64 - p.valSize = sizeZigzag64 case "bytes", "group": p.WireType = WireBytes // no numeric converter for non-numeric types @@ -299,6 +224,7 @@ func (p *Properties) Parse(s string) { return } +outer: for i := 2; i < len(fields); i++ { f := fields[i] switch { @@ -326,229 +252,28 @@ func (p *Properties) Parse(s string) { if i+1 < len(fields) { // Commas aren't escaped, and def is always last. p.Default += "," + strings.Join(fields[i+1:], ",") - break + break outer } } } } -func logNoSliceEnc(t1, t2 reflect.Type) { - fmt.Fprintf(os.Stderr, "proto: no slice oenc for %T = []%T\n", t1, t2) -} - var protoMessageType = reflect.TypeOf((*Message)(nil)).Elem() -// Initialize the fields for encoding and decoding. -func (p *Properties) setEncAndDec(typ reflect.Type, f *reflect.StructField, lockGetProp bool) { - p.enc = nil - p.dec = nil - p.size = nil - +// setFieldProps initializes the field properties for submessages and maps. +func (p *Properties) setFieldProps(typ reflect.Type, f *reflect.StructField, lockGetProp bool) { switch t1 := typ; t1.Kind() { - default: - fmt.Fprintf(os.Stderr, "proto: no coders for %v\n", t1) - - // proto3 scalar types - - case reflect.Bool: - p.enc = (*Buffer).enc_proto3_bool - p.dec = (*Buffer).dec_proto3_bool - p.size = size_proto3_bool - case reflect.Int32: - p.enc = (*Buffer).enc_proto3_int32 - p.dec = (*Buffer).dec_proto3_int32 - p.size = size_proto3_int32 - case reflect.Uint32: - p.enc = (*Buffer).enc_proto3_uint32 - p.dec = (*Buffer).dec_proto3_int32 // can reuse - p.size = size_proto3_uint32 - case reflect.Int64, reflect.Uint64: - p.enc = (*Buffer).enc_proto3_int64 - p.dec = (*Buffer).dec_proto3_int64 - p.size = size_proto3_int64 - case reflect.Float32: - p.enc = (*Buffer).enc_proto3_uint32 // can just treat them as bits - p.dec = (*Buffer).dec_proto3_int32 - p.size = size_proto3_uint32 - case reflect.Float64: - p.enc = (*Buffer).enc_proto3_int64 // can just treat them as bits - p.dec = (*Buffer).dec_proto3_int64 - p.size = size_proto3_int64 - case reflect.String: - p.enc = (*Buffer).enc_proto3_string - p.dec = (*Buffer).dec_proto3_string - p.size = size_proto3_string - case reflect.Ptr: - switch t2 := t1.Elem(); t2.Kind() { - default: - fmt.Fprintf(os.Stderr, "proto: no encoder function for %v -> %v\n", t1, t2) - break - case reflect.Bool: - p.enc = (*Buffer).enc_bool - p.dec = (*Buffer).dec_bool - p.size = size_bool - case reflect.Int32: - p.enc = (*Buffer).enc_int32 - p.dec = (*Buffer).dec_int32 - p.size = size_int32 - case reflect.Uint32: - p.enc = (*Buffer).enc_uint32 - p.dec = (*Buffer).dec_int32 // can reuse - p.size = size_uint32 - case reflect.Int64, reflect.Uint64: - p.enc = (*Buffer).enc_int64 - p.dec = (*Buffer).dec_int64 - p.size = size_int64 - case reflect.Float32: - p.enc = (*Buffer).enc_uint32 // can just treat them as bits - p.dec = (*Buffer).dec_int32 - p.size = size_uint32 - case reflect.Float64: - p.enc = (*Buffer).enc_int64 // can just treat them as bits - p.dec = (*Buffer).dec_int64 - p.size = size_int64 - case reflect.String: - p.enc = (*Buffer).enc_string - p.dec = (*Buffer).dec_string - p.size = size_string - case reflect.Struct: + if t1.Elem().Kind() == reflect.Struct { p.stype = t1.Elem() - p.isMarshaler = isMarshaler(t1) - p.isUnmarshaler = isUnmarshaler(t1) - if p.Wire == "bytes" { - p.enc = (*Buffer).enc_struct_message - p.dec = (*Buffer).dec_struct_message - p.size = size_struct_message - } else { - p.enc = (*Buffer).enc_struct_group - p.dec = (*Buffer).dec_struct_group - p.size = size_struct_group - } } case reflect.Slice: - switch t2 := t1.Elem(); t2.Kind() { - default: - logNoSliceEnc(t1, t2) - break - case reflect.Bool: - if p.Packed { - p.enc = (*Buffer).enc_slice_packed_bool - p.size = size_slice_packed_bool - } else { - p.enc = (*Buffer).enc_slice_bool - p.size = size_slice_bool - } - p.dec = (*Buffer).dec_slice_bool - p.packedDec = (*Buffer).dec_slice_packed_bool - case reflect.Int32: - if p.Packed { - p.enc = (*Buffer).enc_slice_packed_int32 - p.size = size_slice_packed_int32 - } else { - p.enc = (*Buffer).enc_slice_int32 - p.size = size_slice_int32 - } - p.dec = (*Buffer).dec_slice_int32 - p.packedDec = (*Buffer).dec_slice_packed_int32 - case reflect.Uint32: - if p.Packed { - p.enc = (*Buffer).enc_slice_packed_uint32 - p.size = size_slice_packed_uint32 - } else { - p.enc = (*Buffer).enc_slice_uint32 - p.size = size_slice_uint32 - } - p.dec = (*Buffer).dec_slice_int32 - p.packedDec = (*Buffer).dec_slice_packed_int32 - case reflect.Int64, reflect.Uint64: - if p.Packed { - p.enc = (*Buffer).enc_slice_packed_int64 - p.size = size_slice_packed_int64 - } else { - p.enc = (*Buffer).enc_slice_int64 - p.size = size_slice_int64 - } - p.dec = (*Buffer).dec_slice_int64 - p.packedDec = (*Buffer).dec_slice_packed_int64 - case reflect.Uint8: - p.dec = (*Buffer).dec_slice_byte - if p.proto3 { - p.enc = (*Buffer).enc_proto3_slice_byte - p.size = size_proto3_slice_byte - } else { - p.enc = (*Buffer).enc_slice_byte - p.size = size_slice_byte - } - case reflect.Float32, reflect.Float64: - switch t2.Bits() { - case 32: - // can just treat them as bits - if p.Packed { - p.enc = (*Buffer).enc_slice_packed_uint32 - p.size = size_slice_packed_uint32 - } else { - p.enc = (*Buffer).enc_slice_uint32 - p.size = size_slice_uint32 - } - p.dec = (*Buffer).dec_slice_int32 - p.packedDec = (*Buffer).dec_slice_packed_int32 - case 64: - // can just treat them as bits - if p.Packed { - p.enc = (*Buffer).enc_slice_packed_int64 - p.size = size_slice_packed_int64 - } else { - p.enc = (*Buffer).enc_slice_int64 - p.size = size_slice_int64 - } - p.dec = (*Buffer).dec_slice_int64 - p.packedDec = (*Buffer).dec_slice_packed_int64 - default: - logNoSliceEnc(t1, t2) - break - } - case reflect.String: - p.enc = (*Buffer).enc_slice_string - p.dec = (*Buffer).dec_slice_string - p.size = size_slice_string - case reflect.Ptr: - switch t3 := t2.Elem(); t3.Kind() { - default: - fmt.Fprintf(os.Stderr, "proto: no ptr oenc for %T -> %T -> %T\n", t1, t2, t3) - break - case reflect.Struct: - p.stype = t2.Elem() - p.isMarshaler = isMarshaler(t2) - p.isUnmarshaler = isUnmarshaler(t2) - if p.Wire == "bytes" { - p.enc = (*Buffer).enc_slice_struct_message - p.dec = (*Buffer).dec_slice_struct_message - p.size = size_slice_struct_message - } else { - p.enc = (*Buffer).enc_slice_struct_group - p.dec = (*Buffer).dec_slice_struct_group - p.size = size_slice_struct_group - } - } - case reflect.Slice: - switch t2.Elem().Kind() { - default: - fmt.Fprintf(os.Stderr, "proto: no slice elem oenc for %T -> %T -> %T\n", t1, t2, t2.Elem()) - break - case reflect.Uint8: - p.enc = (*Buffer).enc_slice_slice_byte - p.dec = (*Buffer).dec_slice_slice_byte - p.size = size_slice_slice_byte - } + if t2 := t1.Elem(); t2.Kind() == reflect.Ptr && t2.Elem().Kind() == reflect.Struct { + p.stype = t2.Elem() } case reflect.Map: - p.enc = (*Buffer).enc_new_map - p.dec = (*Buffer).dec_new_map - p.size = size_new_map - p.mtype = t1 p.mkeyprop = &Properties{} p.mkeyprop.init(reflect.PtrTo(p.mtype.Key()), "Key", f.Tag.Get("protobuf_key"), nil, lockGetProp) @@ -562,20 +287,6 @@ func (p *Properties) setEncAndDec(typ reflect.Type, f *reflect.StructField, lock p.mvalprop.init(vtype, "Value", f.Tag.Get("protobuf_val"), nil, lockGetProp) } - // precalculate tag code - wire := p.WireType - if p.Packed { - wire = WireBytes - } - x := uint32(p.Tag)<<3 | uint32(wire) - i := 0 - for i = 0; x > 127; i++ { - p.tagbuf[i] = 0x80 | uint8(x&0x7F) - x >>= 7 - } - p.tagbuf[i] = uint8(x) - p.tagcode = p.tagbuf[0 : i+1] - if p.stype != nil { if lockGetProp { p.sprop = GetProperties(p.stype) @@ -586,32 +297,9 @@ func (p *Properties) setEncAndDec(typ reflect.Type, f *reflect.StructField, lock } var ( - marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem() - unmarshalerType = reflect.TypeOf((*Unmarshaler)(nil)).Elem() + marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem() ) -// isMarshaler reports whether type t implements Marshaler. -func isMarshaler(t reflect.Type) bool { - // We're checking for (likely) pointer-receiver methods - // so if t is not a pointer, something is very wrong. - // The calls above only invoke isMarshaler on pointer types. - if t.Kind() != reflect.Ptr { - panic("proto: misuse of isMarshaler") - } - return t.Implements(marshalerType) -} - -// isUnmarshaler reports whether type t implements Unmarshaler. -func isUnmarshaler(t reflect.Type) bool { - // We're checking for (likely) pointer-receiver methods - // so if t is not a pointer, something is very wrong. - // The calls above only invoke isUnmarshaler on pointer types. - if t.Kind() != reflect.Ptr { - panic("proto: misuse of isUnmarshaler") - } - return t.Implements(unmarshalerType) -} - // Init populates the properties from a protocol buffer struct tag. func (p *Properties) Init(typ reflect.Type, name, tag string, f *reflect.StructField) { p.init(typ, name, tag, f, true) @@ -621,14 +309,11 @@ func (p *Properties) init(typ reflect.Type, name, tag string, f *reflect.StructF // "bytes,49,opt,def=hello!" p.Name = name p.OrigName = name - if f != nil { - p.field = toField(f) - } if tag == "" { return } p.Parse(tag) - p.setEncAndDec(typ, f, lockGetProp) + p.setFieldProps(typ, f, lockGetProp) } var ( @@ -678,9 +363,6 @@ func getPropertiesLocked(t reflect.Type) *StructProperties { propertiesMap[t] = prop // build properties - prop.extendable = reflect.PtrTo(t).Implements(extendableProtoType) || - reflect.PtrTo(t).Implements(extendableProtoV1Type) - prop.unrecField = invalidField prop.Prop = make([]*Properties, t.NumField()) prop.order = make([]int, t.NumField()) @@ -690,17 +372,6 @@ func getPropertiesLocked(t reflect.Type) *StructProperties { name := f.Name p.init(f.Type, name, f.Tag.Get("protobuf"), &f, false) - if f.Name == "XXX_InternalExtensions" { // special case - p.enc = (*Buffer).enc_exts - p.dec = nil // not needed - p.size = size_exts - } else if f.Name == "XXX_extensions" { // special case - p.enc = (*Buffer).enc_map - p.dec = nil // not needed - p.size = size_map - } else if f.Name == "XXX_unrecognized" { // special case - prop.unrecField = toField(&f) - } oneof := f.Tag.Get("protobuf_oneof") // special case if oneof != "" { // Oneof fields don't use the traditional protobuf tag. @@ -715,9 +386,6 @@ func getPropertiesLocked(t reflect.Type) *StructProperties { } print("\n") } - if p.enc == nil && !strings.HasPrefix(f.Name, "XXX_") && oneof == "" { - fmt.Fprintln(os.Stderr, "proto: no encoder for", f.Name, f.Type.String(), "[GetProperties]") - } } // Re-order prop.order. @@ -728,8 +396,7 @@ func getPropertiesLocked(t reflect.Type) *StructProperties { } if om, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(oneofMessage); ok { var oots []interface{} - prop.oneofMarshaler, prop.oneofUnmarshaler, prop.oneofSizer, oots = om.XXX_OneofFuncs() - prop.stype = t + _, _, _, oots = om.XXX_OneofFuncs() // Interpret oneof metadata. prop.OneofTypes = make(map[string]*OneofProperties) @@ -779,30 +446,6 @@ func getPropertiesLocked(t reflect.Type) *StructProperties { return prop } -// Return the Properties object for the x[0]'th field of the structure. -func propByIndex(t reflect.Type, x []int) *Properties { - if len(x) != 1 { - fmt.Fprintf(os.Stderr, "proto: field index dimension %d (not 1) for type %s\n", len(x), t) - return nil - } - prop := GetProperties(t) - return prop.Prop[x[0]] -} - -// Get the address and type of a pointer to a struct from an interface. -func getbase(pb Message) (t reflect.Type, b structPointer, err error) { - if pb == nil { - err = ErrNil - return - } - // get the reflect type of the pointer to the struct. - t = reflect.TypeOf(pb) - // get the address of the struct. - value := reflect.ValueOf(pb) - b = toStructPointer(value) - return -} - // A global registry of enum types. // The generated code will register the generated maps by calling RegisterEnum. @@ -826,20 +469,42 @@ func EnumValueMap(enumType string) map[string]int32 { // A registry of all linked message types. // The string is a fully-qualified proto name ("pkg.Message"). var ( - protoTypes = make(map[string]reflect.Type) - revProtoTypes = make(map[reflect.Type]string) + protoTypedNils = make(map[string]Message) // a map from proto names to typed nil pointers + protoMapTypes = make(map[string]reflect.Type) // a map from proto names to map types + revProtoTypes = make(map[reflect.Type]string) ) // RegisterType is called from generated code and maps from the fully qualified // proto name to the type (pointer to struct) of the protocol buffer. func RegisterType(x Message, name string) { - if _, ok := protoTypes[name]; ok { + if _, ok := protoTypedNils[name]; ok { // TODO: Some day, make this a panic. log.Printf("proto: duplicate proto type registered: %s", name) return } t := reflect.TypeOf(x) - protoTypes[name] = t + if v := reflect.ValueOf(x); v.Kind() == reflect.Ptr && v.Pointer() == 0 { + // Generated code always calls RegisterType with nil x. + // This check is just for extra safety. + protoTypedNils[name] = x + } else { + protoTypedNils[name] = reflect.Zero(t).Interface().(Message) + } + revProtoTypes[t] = name +} + +// RegisterMapType is called from generated code and maps from the fully qualified +// proto name to the native map type of the proto map definition. +func RegisterMapType(x interface{}, name string) { + if reflect.TypeOf(x).Kind() != reflect.Map { + panic(fmt.Sprintf("RegisterMapType(%T, %q); want map", x, name)) + } + if _, ok := protoMapTypes[name]; ok { + log.Printf("proto: duplicate proto type registered: %s", name) + return + } + t := reflect.TypeOf(x) + protoMapTypes[name] = t revProtoTypes[t] = name } @@ -855,7 +520,14 @@ func MessageName(x Message) string { } // MessageType returns the message type (pointer to struct) for a named message. -func MessageType(name string) reflect.Type { return protoTypes[name] } +// The type is not guaranteed to implement proto.Message if the name refers to a +// map entry. +func MessageType(name string) reflect.Type { + if t, ok := protoTypedNils[name]; ok { + return reflect.TypeOf(t) + } + return protoMapTypes[name] +} // A registry of all linked proto files. var ( diff --git a/vendor/github.com/golang/protobuf/proto/table_marshal.go b/vendor/github.com/golang/protobuf/proto/table_marshal.go new file mode 100644 index 00000000000..0f212b3029d --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/table_marshal.go @@ -0,0 +1,2681 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "errors" + "fmt" + "math" + "reflect" + "sort" + "strconv" + "strings" + "sync" + "sync/atomic" + "unicode/utf8" +) + +// a sizer takes a pointer to a field and the size of its tag, computes the size of +// the encoded data. +type sizer func(pointer, int) int + +// a marshaler takes a byte slice, a pointer to a field, and its tag (in wire format), +// marshals the field to the end of the slice, returns the slice and error (if any). +type marshaler func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) + +// marshalInfo is the information used for marshaling a message. +type marshalInfo struct { + typ reflect.Type + fields []*marshalFieldInfo + unrecognized field // offset of XXX_unrecognized + extensions field // offset of XXX_InternalExtensions + v1extensions field // offset of XXX_extensions + sizecache field // offset of XXX_sizecache + initialized int32 // 0 -- only typ is set, 1 -- fully initialized + messageset bool // uses message set wire format + hasmarshaler bool // has custom marshaler + sync.RWMutex // protect extElems map, also for initialization + extElems map[int32]*marshalElemInfo // info of extension elements +} + +// marshalFieldInfo is the information used for marshaling a field of a message. +type marshalFieldInfo struct { + field field + wiretag uint64 // tag in wire format + tagsize int // size of tag in wire format + sizer sizer + marshaler marshaler + isPointer bool + required bool // field is required + name string // name of the field, for error reporting + oneofElems map[reflect.Type]*marshalElemInfo // info of oneof elements +} + +// marshalElemInfo is the information used for marshaling an extension or oneof element. +type marshalElemInfo struct { + wiretag uint64 // tag in wire format + tagsize int // size of tag in wire format + sizer sizer + marshaler marshaler + isptr bool // elem is pointer typed, thus interface of this type is a direct interface (extension only) +} + +var ( + marshalInfoMap = map[reflect.Type]*marshalInfo{} + marshalInfoLock sync.Mutex +) + +// getMarshalInfo returns the information to marshal a given type of message. +// The info it returns may not necessarily initialized. +// t is the type of the message (NOT the pointer to it). +func getMarshalInfo(t reflect.Type) *marshalInfo { + marshalInfoLock.Lock() + u, ok := marshalInfoMap[t] + if !ok { + u = &marshalInfo{typ: t} + marshalInfoMap[t] = u + } + marshalInfoLock.Unlock() + return u +} + +// Size is the entry point from generated code, +// and should be ONLY called by generated code. +// It computes the size of encoded data of msg. +// a is a pointer to a place to store cached marshal info. +func (a *InternalMessageInfo) Size(msg Message) int { + u := getMessageMarshalInfo(msg, a) + ptr := toPointer(&msg) + if ptr.isNil() { + // We get here if msg is a typed nil ((*SomeMessage)(nil)), + // so it satisfies the interface, and msg == nil wouldn't + // catch it. We don't want crash in this case. + return 0 + } + return u.size(ptr) +} + +// Marshal is the entry point from generated code, +// and should be ONLY called by generated code. +// It marshals msg to the end of b. +// a is a pointer to a place to store cached marshal info. +func (a *InternalMessageInfo) Marshal(b []byte, msg Message, deterministic bool) ([]byte, error) { + u := getMessageMarshalInfo(msg, a) + ptr := toPointer(&msg) + if ptr.isNil() { + // We get here if msg is a typed nil ((*SomeMessage)(nil)), + // so it satisfies the interface, and msg == nil wouldn't + // catch it. We don't want crash in this case. + return b, ErrNil + } + return u.marshal(b, ptr, deterministic) +} + +func getMessageMarshalInfo(msg interface{}, a *InternalMessageInfo) *marshalInfo { + // u := a.marshal, but atomically. + // We use an atomic here to ensure memory consistency. + u := atomicLoadMarshalInfo(&a.marshal) + if u == nil { + // Get marshal information from type of message. + t := reflect.ValueOf(msg).Type() + if t.Kind() != reflect.Ptr { + panic(fmt.Sprintf("cannot handle non-pointer message type %v", t)) + } + u = getMarshalInfo(t.Elem()) + // Store it in the cache for later users. + // a.marshal = u, but atomically. + atomicStoreMarshalInfo(&a.marshal, u) + } + return u +} + +// size is the main function to compute the size of the encoded data of a message. +// ptr is the pointer to the message. +func (u *marshalInfo) size(ptr pointer) int { + if atomic.LoadInt32(&u.initialized) == 0 { + u.computeMarshalInfo() + } + + // If the message can marshal itself, let it do it, for compatibility. + // NOTE: This is not efficient. + if u.hasmarshaler { + m := ptr.asPointerTo(u.typ).Interface().(Marshaler) + b, _ := m.Marshal() + return len(b) + } + + n := 0 + for _, f := range u.fields { + if f.isPointer && ptr.offset(f.field).getPointer().isNil() { + // nil pointer always marshals to nothing + continue + } + n += f.sizer(ptr.offset(f.field), f.tagsize) + } + if u.extensions.IsValid() { + e := ptr.offset(u.extensions).toExtensions() + if u.messageset { + n += u.sizeMessageSet(e) + } else { + n += u.sizeExtensions(e) + } + } + if u.v1extensions.IsValid() { + m := *ptr.offset(u.v1extensions).toOldExtensions() + n += u.sizeV1Extensions(m) + } + if u.unrecognized.IsValid() { + s := *ptr.offset(u.unrecognized).toBytes() + n += len(s) + } + // cache the result for use in marshal + if u.sizecache.IsValid() { + atomic.StoreInt32(ptr.offset(u.sizecache).toInt32(), int32(n)) + } + return n +} + +// cachedsize gets the size from cache. If there is no cache (i.e. message is not generated), +// fall back to compute the size. +func (u *marshalInfo) cachedsize(ptr pointer) int { + if u.sizecache.IsValid() { + return int(atomic.LoadInt32(ptr.offset(u.sizecache).toInt32())) + } + return u.size(ptr) +} + +// marshal is the main function to marshal a message. It takes a byte slice and appends +// the encoded data to the end of the slice, returns the slice and error (if any). +// ptr is the pointer to the message. +// If deterministic is true, map is marshaled in deterministic order. +func (u *marshalInfo) marshal(b []byte, ptr pointer, deterministic bool) ([]byte, error) { + if atomic.LoadInt32(&u.initialized) == 0 { + u.computeMarshalInfo() + } + + // If the message can marshal itself, let it do it, for compatibility. + // NOTE: This is not efficient. + if u.hasmarshaler { + m := ptr.asPointerTo(u.typ).Interface().(Marshaler) + b1, err := m.Marshal() + b = append(b, b1...) + return b, err + } + + var err, errreq error + // The old marshaler encodes extensions at beginning. + if u.extensions.IsValid() { + e := ptr.offset(u.extensions).toExtensions() + if u.messageset { + b, err = u.appendMessageSet(b, e, deterministic) + } else { + b, err = u.appendExtensions(b, e, deterministic) + } + if err != nil { + return b, err + } + } + if u.v1extensions.IsValid() { + m := *ptr.offset(u.v1extensions).toOldExtensions() + b, err = u.appendV1Extensions(b, m, deterministic) + if err != nil { + return b, err + } + } + for _, f := range u.fields { + if f.required && errreq == nil { + if ptr.offset(f.field).getPointer().isNil() { + // Required field is not set. + // We record the error but keep going, to give a complete marshaling. + errreq = &RequiredNotSetError{f.name} + continue + } + } + if f.isPointer && ptr.offset(f.field).getPointer().isNil() { + // nil pointer always marshals to nothing + continue + } + b, err = f.marshaler(b, ptr.offset(f.field), f.wiretag, deterministic) + if err != nil { + if err1, ok := err.(*RequiredNotSetError); ok { + // Required field in submessage is not set. + // We record the error but keep going, to give a complete marshaling. + if errreq == nil { + errreq = &RequiredNotSetError{f.name + "." + err1.field} + } + continue + } + if err == errRepeatedHasNil { + err = errors.New("proto: repeated field " + f.name + " has nil element") + } + return b, err + } + } + if u.unrecognized.IsValid() { + s := *ptr.offset(u.unrecognized).toBytes() + b = append(b, s...) + } + return b, errreq +} + +// computeMarshalInfo initializes the marshal info. +func (u *marshalInfo) computeMarshalInfo() { + u.Lock() + defer u.Unlock() + if u.initialized != 0 { // non-atomic read is ok as it is protected by the lock + return + } + + t := u.typ + u.unrecognized = invalidField + u.extensions = invalidField + u.v1extensions = invalidField + u.sizecache = invalidField + + // If the message can marshal itself, let it do it, for compatibility. + // NOTE: This is not efficient. + if reflect.PtrTo(t).Implements(marshalerType) { + u.hasmarshaler = true + atomic.StoreInt32(&u.initialized, 1) + return + } + + // get oneof implementers + var oneofImplementers []interface{} + if m, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(oneofMessage); ok { + _, _, _, oneofImplementers = m.XXX_OneofFuncs() + } + + n := t.NumField() + + // deal with XXX fields first + for i := 0; i < t.NumField(); i++ { + f := t.Field(i) + if !strings.HasPrefix(f.Name, "XXX_") { + continue + } + switch f.Name { + case "XXX_sizecache": + u.sizecache = toField(&f) + case "XXX_unrecognized": + u.unrecognized = toField(&f) + case "XXX_InternalExtensions": + u.extensions = toField(&f) + u.messageset = f.Tag.Get("protobuf_messageset") == "1" + case "XXX_extensions": + u.v1extensions = toField(&f) + case "XXX_NoUnkeyedLiteral": + // nothing to do + default: + panic("unknown XXX field: " + f.Name) + } + n-- + } + + // normal fields + fields := make([]marshalFieldInfo, n) // batch allocation + u.fields = make([]*marshalFieldInfo, 0, n) + for i, j := 0, 0; i < t.NumField(); i++ { + f := t.Field(i) + + if strings.HasPrefix(f.Name, "XXX_") { + continue + } + field := &fields[j] + j++ + field.name = f.Name + u.fields = append(u.fields, field) + if f.Tag.Get("protobuf_oneof") != "" { + field.computeOneofFieldInfo(&f, oneofImplementers) + continue + } + if f.Tag.Get("protobuf") == "" { + // field has no tag (not in generated message), ignore it + u.fields = u.fields[:len(u.fields)-1] + j-- + continue + } + field.computeMarshalFieldInfo(&f) + } + + // fields are marshaled in tag order on the wire. + sort.Sort(byTag(u.fields)) + + atomic.StoreInt32(&u.initialized, 1) +} + +// helper for sorting fields by tag +type byTag []*marshalFieldInfo + +func (a byTag) Len() int { return len(a) } +func (a byTag) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a byTag) Less(i, j int) bool { return a[i].wiretag < a[j].wiretag } + +// getExtElemInfo returns the information to marshal an extension element. +// The info it returns is initialized. +func (u *marshalInfo) getExtElemInfo(desc *ExtensionDesc) *marshalElemInfo { + // get from cache first + u.RLock() + e, ok := u.extElems[desc.Field] + u.RUnlock() + if ok { + return e + } + + t := reflect.TypeOf(desc.ExtensionType) // pointer or slice to basic type or struct + tags := strings.Split(desc.Tag, ",") + tag, err := strconv.Atoi(tags[1]) + if err != nil { + panic("tag is not an integer") + } + wt := wiretype(tags[0]) + sizer, marshaler := typeMarshaler(t, tags, false, false) + e = &marshalElemInfo{ + wiretag: uint64(tag)<<3 | wt, + tagsize: SizeVarint(uint64(tag) << 3), + sizer: sizer, + marshaler: marshaler, + isptr: t.Kind() == reflect.Ptr, + } + + // update cache + u.Lock() + if u.extElems == nil { + u.extElems = make(map[int32]*marshalElemInfo) + } + u.extElems[desc.Field] = e + u.Unlock() + return e +} + +// computeMarshalFieldInfo fills up the information to marshal a field. +func (fi *marshalFieldInfo) computeMarshalFieldInfo(f *reflect.StructField) { + // parse protobuf tag of the field. + // tag has format of "bytes,49,opt,name=foo,def=hello!" + tags := strings.Split(f.Tag.Get("protobuf"), ",") + if tags[0] == "" { + return + } + tag, err := strconv.Atoi(tags[1]) + if err != nil { + panic("tag is not an integer") + } + wt := wiretype(tags[0]) + if tags[2] == "req" { + fi.required = true + } + fi.setTag(f, tag, wt) + fi.setMarshaler(f, tags) +} + +func (fi *marshalFieldInfo) computeOneofFieldInfo(f *reflect.StructField, oneofImplementers []interface{}) { + fi.field = toField(f) + fi.wiretag = 1<<31 - 1 // Use a large tag number, make oneofs sorted at the end. This tag will not appear on the wire. + fi.isPointer = true + fi.sizer, fi.marshaler = makeOneOfMarshaler(fi, f) + fi.oneofElems = make(map[reflect.Type]*marshalElemInfo) + + ityp := f.Type // interface type + for _, o := range oneofImplementers { + t := reflect.TypeOf(o) + if !t.Implements(ityp) { + continue + } + sf := t.Elem().Field(0) // oneof implementer is a struct with a single field + tags := strings.Split(sf.Tag.Get("protobuf"), ",") + tag, err := strconv.Atoi(tags[1]) + if err != nil { + panic("tag is not an integer") + } + wt := wiretype(tags[0]) + sizer, marshaler := typeMarshaler(sf.Type, tags, false, true) // oneof should not omit any zero value + fi.oneofElems[t.Elem()] = &marshalElemInfo{ + wiretag: uint64(tag)<<3 | wt, + tagsize: SizeVarint(uint64(tag) << 3), + sizer: sizer, + marshaler: marshaler, + } + } +} + +type oneofMessage interface { + XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{}) +} + +// wiretype returns the wire encoding of the type. +func wiretype(encoding string) uint64 { + switch encoding { + case "fixed32": + return WireFixed32 + case "fixed64": + return WireFixed64 + case "varint", "zigzag32", "zigzag64": + return WireVarint + case "bytes": + return WireBytes + case "group": + return WireStartGroup + } + panic("unknown wire type " + encoding) +} + +// setTag fills up the tag (in wire format) and its size in the info of a field. +func (fi *marshalFieldInfo) setTag(f *reflect.StructField, tag int, wt uint64) { + fi.field = toField(f) + fi.wiretag = uint64(tag)<<3 | wt + fi.tagsize = SizeVarint(uint64(tag) << 3) +} + +// setMarshaler fills up the sizer and marshaler in the info of a field. +func (fi *marshalFieldInfo) setMarshaler(f *reflect.StructField, tags []string) { + switch f.Type.Kind() { + case reflect.Map: + // map field + fi.isPointer = true + fi.sizer, fi.marshaler = makeMapMarshaler(f) + return + case reflect.Ptr, reflect.Slice: + fi.isPointer = true + } + fi.sizer, fi.marshaler = typeMarshaler(f.Type, tags, true, false) +} + +// typeMarshaler returns the sizer and marshaler of a given field. +// t is the type of the field. +// tags is the generated "protobuf" tag of the field. +// If nozero is true, zero value is not marshaled to the wire. +// If oneof is true, it is a oneof field. +func typeMarshaler(t reflect.Type, tags []string, nozero, oneof bool) (sizer, marshaler) { + encoding := tags[0] + + pointer := false + slice := false + if t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 { + slice = true + t = t.Elem() + } + if t.Kind() == reflect.Ptr { + pointer = true + t = t.Elem() + } + + packed := false + proto3 := false + for i := 2; i < len(tags); i++ { + if tags[i] == "packed" { + packed = true + } + if tags[i] == "proto3" { + proto3 = true + } + } + + switch t.Kind() { + case reflect.Bool: + if pointer { + return sizeBoolPtr, appendBoolPtr + } + if slice { + if packed { + return sizeBoolPackedSlice, appendBoolPackedSlice + } + return sizeBoolSlice, appendBoolSlice + } + if nozero { + return sizeBoolValueNoZero, appendBoolValueNoZero + } + return sizeBoolValue, appendBoolValue + case reflect.Uint32: + switch encoding { + case "fixed32": + if pointer { + return sizeFixed32Ptr, appendFixed32Ptr + } + if slice { + if packed { + return sizeFixed32PackedSlice, appendFixed32PackedSlice + } + return sizeFixed32Slice, appendFixed32Slice + } + if nozero { + return sizeFixed32ValueNoZero, appendFixed32ValueNoZero + } + return sizeFixed32Value, appendFixed32Value + case "varint": + if pointer { + return sizeVarint32Ptr, appendVarint32Ptr + } + if slice { + if packed { + return sizeVarint32PackedSlice, appendVarint32PackedSlice + } + return sizeVarint32Slice, appendVarint32Slice + } + if nozero { + return sizeVarint32ValueNoZero, appendVarint32ValueNoZero + } + return sizeVarint32Value, appendVarint32Value + } + case reflect.Int32: + switch encoding { + case "fixed32": + if pointer { + return sizeFixedS32Ptr, appendFixedS32Ptr + } + if slice { + if packed { + return sizeFixedS32PackedSlice, appendFixedS32PackedSlice + } + return sizeFixedS32Slice, appendFixedS32Slice + } + if nozero { + return sizeFixedS32ValueNoZero, appendFixedS32ValueNoZero + } + return sizeFixedS32Value, appendFixedS32Value + case "varint": + if pointer { + return sizeVarintS32Ptr, appendVarintS32Ptr + } + if slice { + if packed { + return sizeVarintS32PackedSlice, appendVarintS32PackedSlice + } + return sizeVarintS32Slice, appendVarintS32Slice + } + if nozero { + return sizeVarintS32ValueNoZero, appendVarintS32ValueNoZero + } + return sizeVarintS32Value, appendVarintS32Value + case "zigzag32": + if pointer { + return sizeZigzag32Ptr, appendZigzag32Ptr + } + if slice { + if packed { + return sizeZigzag32PackedSlice, appendZigzag32PackedSlice + } + return sizeZigzag32Slice, appendZigzag32Slice + } + if nozero { + return sizeZigzag32ValueNoZero, appendZigzag32ValueNoZero + } + return sizeZigzag32Value, appendZigzag32Value + } + case reflect.Uint64: + switch encoding { + case "fixed64": + if pointer { + return sizeFixed64Ptr, appendFixed64Ptr + } + if slice { + if packed { + return sizeFixed64PackedSlice, appendFixed64PackedSlice + } + return sizeFixed64Slice, appendFixed64Slice + } + if nozero { + return sizeFixed64ValueNoZero, appendFixed64ValueNoZero + } + return sizeFixed64Value, appendFixed64Value + case "varint": + if pointer { + return sizeVarint64Ptr, appendVarint64Ptr + } + if slice { + if packed { + return sizeVarint64PackedSlice, appendVarint64PackedSlice + } + return sizeVarint64Slice, appendVarint64Slice + } + if nozero { + return sizeVarint64ValueNoZero, appendVarint64ValueNoZero + } + return sizeVarint64Value, appendVarint64Value + } + case reflect.Int64: + switch encoding { + case "fixed64": + if pointer { + return sizeFixedS64Ptr, appendFixedS64Ptr + } + if slice { + if packed { + return sizeFixedS64PackedSlice, appendFixedS64PackedSlice + } + return sizeFixedS64Slice, appendFixedS64Slice + } + if nozero { + return sizeFixedS64ValueNoZero, appendFixedS64ValueNoZero + } + return sizeFixedS64Value, appendFixedS64Value + case "varint": + if pointer { + return sizeVarintS64Ptr, appendVarintS64Ptr + } + if slice { + if packed { + return sizeVarintS64PackedSlice, appendVarintS64PackedSlice + } + return sizeVarintS64Slice, appendVarintS64Slice + } + if nozero { + return sizeVarintS64ValueNoZero, appendVarintS64ValueNoZero + } + return sizeVarintS64Value, appendVarintS64Value + case "zigzag64": + if pointer { + return sizeZigzag64Ptr, appendZigzag64Ptr + } + if slice { + if packed { + return sizeZigzag64PackedSlice, appendZigzag64PackedSlice + } + return sizeZigzag64Slice, appendZigzag64Slice + } + if nozero { + return sizeZigzag64ValueNoZero, appendZigzag64ValueNoZero + } + return sizeZigzag64Value, appendZigzag64Value + } + case reflect.Float32: + if pointer { + return sizeFloat32Ptr, appendFloat32Ptr + } + if slice { + if packed { + return sizeFloat32PackedSlice, appendFloat32PackedSlice + } + return sizeFloat32Slice, appendFloat32Slice + } + if nozero { + return sizeFloat32ValueNoZero, appendFloat32ValueNoZero + } + return sizeFloat32Value, appendFloat32Value + case reflect.Float64: + if pointer { + return sizeFloat64Ptr, appendFloat64Ptr + } + if slice { + if packed { + return sizeFloat64PackedSlice, appendFloat64PackedSlice + } + return sizeFloat64Slice, appendFloat64Slice + } + if nozero { + return sizeFloat64ValueNoZero, appendFloat64ValueNoZero + } + return sizeFloat64Value, appendFloat64Value + case reflect.String: + if pointer { + return sizeStringPtr, appendStringPtr + } + if slice { + return sizeStringSlice, appendStringSlice + } + if nozero { + return sizeStringValueNoZero, appendStringValueNoZero + } + return sizeStringValue, appendStringValue + case reflect.Slice: + if slice { + return sizeBytesSlice, appendBytesSlice + } + if oneof { + // Oneof bytes field may also have "proto3" tag. + // We want to marshal it as a oneof field. Do this + // check before the proto3 check. + return sizeBytesOneof, appendBytesOneof + } + if proto3 { + return sizeBytes3, appendBytes3 + } + return sizeBytes, appendBytes + case reflect.Struct: + switch encoding { + case "group": + if slice { + return makeGroupSliceMarshaler(getMarshalInfo(t)) + } + return makeGroupMarshaler(getMarshalInfo(t)) + case "bytes": + if slice { + return makeMessageSliceMarshaler(getMarshalInfo(t)) + } + return makeMessageMarshaler(getMarshalInfo(t)) + } + } + panic(fmt.Sprintf("unknown or mismatched type: type: %v, wire type: %v", t, encoding)) +} + +// Below are functions to size/marshal a specific type of a field. +// They are stored in the field's info, and called by function pointers. +// They have type sizer or marshaler. + +func sizeFixed32Value(_ pointer, tagsize int) int { + return 4 + tagsize +} +func sizeFixed32ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toUint32() + if v == 0 { + return 0 + } + return 4 + tagsize +} +func sizeFixed32Ptr(ptr pointer, tagsize int) int { + p := *ptr.toUint32Ptr() + if p == nil { + return 0 + } + return 4 + tagsize +} +func sizeFixed32Slice(ptr pointer, tagsize int) int { + s := *ptr.toUint32Slice() + return (4 + tagsize) * len(s) +} +func sizeFixed32PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toUint32Slice() + if len(s) == 0 { + return 0 + } + return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize +} +func sizeFixedS32Value(_ pointer, tagsize int) int { + return 4 + tagsize +} +func sizeFixedS32ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toInt32() + if v == 0 { + return 0 + } + return 4 + tagsize +} +func sizeFixedS32Ptr(ptr pointer, tagsize int) int { + p := ptr.getInt32Ptr() + if p == nil { + return 0 + } + return 4 + tagsize +} +func sizeFixedS32Slice(ptr pointer, tagsize int) int { + s := ptr.getInt32Slice() + return (4 + tagsize) * len(s) +} +func sizeFixedS32PackedSlice(ptr pointer, tagsize int) int { + s := ptr.getInt32Slice() + if len(s) == 0 { + return 0 + } + return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize +} +func sizeFloat32Value(_ pointer, tagsize int) int { + return 4 + tagsize +} +func sizeFloat32ValueNoZero(ptr pointer, tagsize int) int { + v := math.Float32bits(*ptr.toFloat32()) + if v == 0 { + return 0 + } + return 4 + tagsize +} +func sizeFloat32Ptr(ptr pointer, tagsize int) int { + p := *ptr.toFloat32Ptr() + if p == nil { + return 0 + } + return 4 + tagsize +} +func sizeFloat32Slice(ptr pointer, tagsize int) int { + s := *ptr.toFloat32Slice() + return (4 + tagsize) * len(s) +} +func sizeFloat32PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toFloat32Slice() + if len(s) == 0 { + return 0 + } + return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize +} +func sizeFixed64Value(_ pointer, tagsize int) int { + return 8 + tagsize +} +func sizeFixed64ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toUint64() + if v == 0 { + return 0 + } + return 8 + tagsize +} +func sizeFixed64Ptr(ptr pointer, tagsize int) int { + p := *ptr.toUint64Ptr() + if p == nil { + return 0 + } + return 8 + tagsize +} +func sizeFixed64Slice(ptr pointer, tagsize int) int { + s := *ptr.toUint64Slice() + return (8 + tagsize) * len(s) +} +func sizeFixed64PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toUint64Slice() + if len(s) == 0 { + return 0 + } + return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize +} +func sizeFixedS64Value(_ pointer, tagsize int) int { + return 8 + tagsize +} +func sizeFixedS64ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toInt64() + if v == 0 { + return 0 + } + return 8 + tagsize +} +func sizeFixedS64Ptr(ptr pointer, tagsize int) int { + p := *ptr.toInt64Ptr() + if p == nil { + return 0 + } + return 8 + tagsize +} +func sizeFixedS64Slice(ptr pointer, tagsize int) int { + s := *ptr.toInt64Slice() + return (8 + tagsize) * len(s) +} +func sizeFixedS64PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toInt64Slice() + if len(s) == 0 { + return 0 + } + return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize +} +func sizeFloat64Value(_ pointer, tagsize int) int { + return 8 + tagsize +} +func sizeFloat64ValueNoZero(ptr pointer, tagsize int) int { + v := math.Float64bits(*ptr.toFloat64()) + if v == 0 { + return 0 + } + return 8 + tagsize +} +func sizeFloat64Ptr(ptr pointer, tagsize int) int { + p := *ptr.toFloat64Ptr() + if p == nil { + return 0 + } + return 8 + tagsize +} +func sizeFloat64Slice(ptr pointer, tagsize int) int { + s := *ptr.toFloat64Slice() + return (8 + tagsize) * len(s) +} +func sizeFloat64PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toFloat64Slice() + if len(s) == 0 { + return 0 + } + return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize +} +func sizeVarint32Value(ptr pointer, tagsize int) int { + v := *ptr.toUint32() + return SizeVarint(uint64(v)) + tagsize +} +func sizeVarint32ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toUint32() + if v == 0 { + return 0 + } + return SizeVarint(uint64(v)) + tagsize +} +func sizeVarint32Ptr(ptr pointer, tagsize int) int { + p := *ptr.toUint32Ptr() + if p == nil { + return 0 + } + return SizeVarint(uint64(*p)) + tagsize +} +func sizeVarint32Slice(ptr pointer, tagsize int) int { + s := *ptr.toUint32Slice() + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + tagsize + } + return n +} +func sizeVarint32PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toUint32Slice() + if len(s) == 0 { + return 0 + } + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + } + return n + SizeVarint(uint64(n)) + tagsize +} +func sizeVarintS32Value(ptr pointer, tagsize int) int { + v := *ptr.toInt32() + return SizeVarint(uint64(v)) + tagsize +} +func sizeVarintS32ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toInt32() + if v == 0 { + return 0 + } + return SizeVarint(uint64(v)) + tagsize +} +func sizeVarintS32Ptr(ptr pointer, tagsize int) int { + p := ptr.getInt32Ptr() + if p == nil { + return 0 + } + return SizeVarint(uint64(*p)) + tagsize +} +func sizeVarintS32Slice(ptr pointer, tagsize int) int { + s := ptr.getInt32Slice() + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + tagsize + } + return n +} +func sizeVarintS32PackedSlice(ptr pointer, tagsize int) int { + s := ptr.getInt32Slice() + if len(s) == 0 { + return 0 + } + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + } + return n + SizeVarint(uint64(n)) + tagsize +} +func sizeVarint64Value(ptr pointer, tagsize int) int { + v := *ptr.toUint64() + return SizeVarint(v) + tagsize +} +func sizeVarint64ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toUint64() + if v == 0 { + return 0 + } + return SizeVarint(v) + tagsize +} +func sizeVarint64Ptr(ptr pointer, tagsize int) int { + p := *ptr.toUint64Ptr() + if p == nil { + return 0 + } + return SizeVarint(*p) + tagsize +} +func sizeVarint64Slice(ptr pointer, tagsize int) int { + s := *ptr.toUint64Slice() + n := 0 + for _, v := range s { + n += SizeVarint(v) + tagsize + } + return n +} +func sizeVarint64PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toUint64Slice() + if len(s) == 0 { + return 0 + } + n := 0 + for _, v := range s { + n += SizeVarint(v) + } + return n + SizeVarint(uint64(n)) + tagsize +} +func sizeVarintS64Value(ptr pointer, tagsize int) int { + v := *ptr.toInt64() + return SizeVarint(uint64(v)) + tagsize +} +func sizeVarintS64ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toInt64() + if v == 0 { + return 0 + } + return SizeVarint(uint64(v)) + tagsize +} +func sizeVarintS64Ptr(ptr pointer, tagsize int) int { + p := *ptr.toInt64Ptr() + if p == nil { + return 0 + } + return SizeVarint(uint64(*p)) + tagsize +} +func sizeVarintS64Slice(ptr pointer, tagsize int) int { + s := *ptr.toInt64Slice() + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + tagsize + } + return n +} +func sizeVarintS64PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toInt64Slice() + if len(s) == 0 { + return 0 + } + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + } + return n + SizeVarint(uint64(n)) + tagsize +} +func sizeZigzag32Value(ptr pointer, tagsize int) int { + v := *ptr.toInt32() + return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize +} +func sizeZigzag32ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toInt32() + if v == 0 { + return 0 + } + return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize +} +func sizeZigzag32Ptr(ptr pointer, tagsize int) int { + p := ptr.getInt32Ptr() + if p == nil { + return 0 + } + v := *p + return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize +} +func sizeZigzag32Slice(ptr pointer, tagsize int) int { + s := ptr.getInt32Slice() + n := 0 + for _, v := range s { + n += SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize + } + return n +} +func sizeZigzag32PackedSlice(ptr pointer, tagsize int) int { + s := ptr.getInt32Slice() + if len(s) == 0 { + return 0 + } + n := 0 + for _, v := range s { + n += SizeVarint(uint64((uint32(v) << 1) ^ uint32((int32(v) >> 31)))) + } + return n + SizeVarint(uint64(n)) + tagsize +} +func sizeZigzag64Value(ptr pointer, tagsize int) int { + v := *ptr.toInt64() + return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize +} +func sizeZigzag64ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toInt64() + if v == 0 { + return 0 + } + return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize +} +func sizeZigzag64Ptr(ptr pointer, tagsize int) int { + p := *ptr.toInt64Ptr() + if p == nil { + return 0 + } + v := *p + return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize +} +func sizeZigzag64Slice(ptr pointer, tagsize int) int { + s := *ptr.toInt64Slice() + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize + } + return n +} +func sizeZigzag64PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toInt64Slice() + if len(s) == 0 { + return 0 + } + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v<<1) ^ uint64((int64(v) >> 63))) + } + return n + SizeVarint(uint64(n)) + tagsize +} +func sizeBoolValue(_ pointer, tagsize int) int { + return 1 + tagsize +} +func sizeBoolValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toBool() + if !v { + return 0 + } + return 1 + tagsize +} +func sizeBoolPtr(ptr pointer, tagsize int) int { + p := *ptr.toBoolPtr() + if p == nil { + return 0 + } + return 1 + tagsize +} +func sizeBoolSlice(ptr pointer, tagsize int) int { + s := *ptr.toBoolSlice() + return (1 + tagsize) * len(s) +} +func sizeBoolPackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toBoolSlice() + if len(s) == 0 { + return 0 + } + return len(s) + SizeVarint(uint64(len(s))) + tagsize +} +func sizeStringValue(ptr pointer, tagsize int) int { + v := *ptr.toString() + return len(v) + SizeVarint(uint64(len(v))) + tagsize +} +func sizeStringValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toString() + if v == "" { + return 0 + } + return len(v) + SizeVarint(uint64(len(v))) + tagsize +} +func sizeStringPtr(ptr pointer, tagsize int) int { + p := *ptr.toStringPtr() + if p == nil { + return 0 + } + v := *p + return len(v) + SizeVarint(uint64(len(v))) + tagsize +} +func sizeStringSlice(ptr pointer, tagsize int) int { + s := *ptr.toStringSlice() + n := 0 + for _, v := range s { + n += len(v) + SizeVarint(uint64(len(v))) + tagsize + } + return n +} +func sizeBytes(ptr pointer, tagsize int) int { + v := *ptr.toBytes() + if v == nil { + return 0 + } + return len(v) + SizeVarint(uint64(len(v))) + tagsize +} +func sizeBytes3(ptr pointer, tagsize int) int { + v := *ptr.toBytes() + if len(v) == 0 { + return 0 + } + return len(v) + SizeVarint(uint64(len(v))) + tagsize +} +func sizeBytesOneof(ptr pointer, tagsize int) int { + v := *ptr.toBytes() + return len(v) + SizeVarint(uint64(len(v))) + tagsize +} +func sizeBytesSlice(ptr pointer, tagsize int) int { + s := *ptr.toBytesSlice() + n := 0 + for _, v := range s { + n += len(v) + SizeVarint(uint64(len(v))) + tagsize + } + return n +} + +// appendFixed32 appends an encoded fixed32 to b. +func appendFixed32(b []byte, v uint32) []byte { + b = append(b, + byte(v), + byte(v>>8), + byte(v>>16), + byte(v>>24)) + return b +} + +// appendFixed64 appends an encoded fixed64 to b. +func appendFixed64(b []byte, v uint64) []byte { + b = append(b, + byte(v), + byte(v>>8), + byte(v>>16), + byte(v>>24), + byte(v>>32), + byte(v>>40), + byte(v>>48), + byte(v>>56)) + return b +} + +// appendVarint appends an encoded varint to b. +func appendVarint(b []byte, v uint64) []byte { + // TODO: make 1-byte (maybe 2-byte) case inline-able, once we + // have non-leaf inliner. + switch { + case v < 1<<7: + b = append(b, byte(v)) + case v < 1<<14: + b = append(b, + byte(v&0x7f|0x80), + byte(v>>7)) + case v < 1<<21: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte(v>>14)) + case v < 1<<28: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte(v>>21)) + case v < 1<<35: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte((v>>21)&0x7f|0x80), + byte(v>>28)) + case v < 1<<42: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte((v>>21)&0x7f|0x80), + byte((v>>28)&0x7f|0x80), + byte(v>>35)) + case v < 1<<49: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte((v>>21)&0x7f|0x80), + byte((v>>28)&0x7f|0x80), + byte((v>>35)&0x7f|0x80), + byte(v>>42)) + case v < 1<<56: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte((v>>21)&0x7f|0x80), + byte((v>>28)&0x7f|0x80), + byte((v>>35)&0x7f|0x80), + byte((v>>42)&0x7f|0x80), + byte(v>>49)) + case v < 1<<63: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte((v>>21)&0x7f|0x80), + byte((v>>28)&0x7f|0x80), + byte((v>>35)&0x7f|0x80), + byte((v>>42)&0x7f|0x80), + byte((v>>49)&0x7f|0x80), + byte(v>>56)) + default: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte((v>>21)&0x7f|0x80), + byte((v>>28)&0x7f|0x80), + byte((v>>35)&0x7f|0x80), + byte((v>>42)&0x7f|0x80), + byte((v>>49)&0x7f|0x80), + byte((v>>56)&0x7f|0x80), + 1) + } + return b +} + +func appendFixed32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint32() + b = appendVarint(b, wiretag) + b = appendFixed32(b, v) + return b, nil +} +func appendFixed32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint32() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed32(b, v) + return b, nil +} +func appendFixed32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toUint32Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed32(b, *p) + return b, nil +} +func appendFixed32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint32Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendFixed32(b, v) + } + return b, nil +} +func appendFixed32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint32Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + b = appendVarint(b, uint64(4*len(s))) + for _, v := range s { + b = appendFixed32(b, v) + } + return b, nil +} +func appendFixedS32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt32() + b = appendVarint(b, wiretag) + b = appendFixed32(b, uint32(v)) + return b, nil +} +func appendFixedS32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt32() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed32(b, uint32(v)) + return b, nil +} +func appendFixedS32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := ptr.getInt32Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed32(b, uint32(*p)) + return b, nil +} +func appendFixedS32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := ptr.getInt32Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendFixed32(b, uint32(v)) + } + return b, nil +} +func appendFixedS32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := ptr.getInt32Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + b = appendVarint(b, uint64(4*len(s))) + for _, v := range s { + b = appendFixed32(b, uint32(v)) + } + return b, nil +} +func appendFloat32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := math.Float32bits(*ptr.toFloat32()) + b = appendVarint(b, wiretag) + b = appendFixed32(b, v) + return b, nil +} +func appendFloat32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := math.Float32bits(*ptr.toFloat32()) + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed32(b, v) + return b, nil +} +func appendFloat32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toFloat32Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed32(b, math.Float32bits(*p)) + return b, nil +} +func appendFloat32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toFloat32Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendFixed32(b, math.Float32bits(v)) + } + return b, nil +} +func appendFloat32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toFloat32Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + b = appendVarint(b, uint64(4*len(s))) + for _, v := range s { + b = appendFixed32(b, math.Float32bits(v)) + } + return b, nil +} +func appendFixed64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint64() + b = appendVarint(b, wiretag) + b = appendFixed64(b, v) + return b, nil +} +func appendFixed64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint64() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed64(b, v) + return b, nil +} +func appendFixed64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toUint64Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed64(b, *p) + return b, nil +} +func appendFixed64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint64Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendFixed64(b, v) + } + return b, nil +} +func appendFixed64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint64Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + b = appendVarint(b, uint64(8*len(s))) + for _, v := range s { + b = appendFixed64(b, v) + } + return b, nil +} +func appendFixedS64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt64() + b = appendVarint(b, wiretag) + b = appendFixed64(b, uint64(v)) + return b, nil +} +func appendFixedS64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt64() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed64(b, uint64(v)) + return b, nil +} +func appendFixedS64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toInt64Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed64(b, uint64(*p)) + return b, nil +} +func appendFixedS64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toInt64Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendFixed64(b, uint64(v)) + } + return b, nil +} +func appendFixedS64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toInt64Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + b = appendVarint(b, uint64(8*len(s))) + for _, v := range s { + b = appendFixed64(b, uint64(v)) + } + return b, nil +} +func appendFloat64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := math.Float64bits(*ptr.toFloat64()) + b = appendVarint(b, wiretag) + b = appendFixed64(b, v) + return b, nil +} +func appendFloat64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := math.Float64bits(*ptr.toFloat64()) + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed64(b, v) + return b, nil +} +func appendFloat64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toFloat64Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed64(b, math.Float64bits(*p)) + return b, nil +} +func appendFloat64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toFloat64Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendFixed64(b, math.Float64bits(v)) + } + return b, nil +} +func appendFloat64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toFloat64Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + b = appendVarint(b, uint64(8*len(s))) + for _, v := range s { + b = appendFixed64(b, math.Float64bits(v)) + } + return b, nil +} +func appendVarint32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint32() + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + return b, nil +} +func appendVarint32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint32() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + return b, nil +} +func appendVarint32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toUint32Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(*p)) + return b, nil +} +func appendVarint32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint32Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + } + return b, nil +} +func appendVarint32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint32Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + // compute size + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + } + b = appendVarint(b, uint64(n)) + for _, v := range s { + b = appendVarint(b, uint64(v)) + } + return b, nil +} +func appendVarintS32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt32() + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + return b, nil +} +func appendVarintS32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt32() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + return b, nil +} +func appendVarintS32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := ptr.getInt32Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(*p)) + return b, nil +} +func appendVarintS32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := ptr.getInt32Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + } + return b, nil +} +func appendVarintS32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := ptr.getInt32Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + // compute size + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + } + b = appendVarint(b, uint64(n)) + for _, v := range s { + b = appendVarint(b, uint64(v)) + } + return b, nil +} +func appendVarint64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint64() + b = appendVarint(b, wiretag) + b = appendVarint(b, v) + return b, nil +} +func appendVarint64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint64() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, v) + return b, nil +} +func appendVarint64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toUint64Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, *p) + return b, nil +} +func appendVarint64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint64Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, v) + } + return b, nil +} +func appendVarint64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint64Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + // compute size + n := 0 + for _, v := range s { + n += SizeVarint(v) + } + b = appendVarint(b, uint64(n)) + for _, v := range s { + b = appendVarint(b, v) + } + return b, nil +} +func appendVarintS64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt64() + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + return b, nil +} +func appendVarintS64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt64() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + return b, nil +} +func appendVarintS64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toInt64Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(*p)) + return b, nil +} +func appendVarintS64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toInt64Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + } + return b, nil +} +func appendVarintS64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toInt64Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + // compute size + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + } + b = appendVarint(b, uint64(n)) + for _, v := range s { + b = appendVarint(b, uint64(v)) + } + return b, nil +} +func appendZigzag32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt32() + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + return b, nil +} +func appendZigzag32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt32() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + return b, nil +} +func appendZigzag32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := ptr.getInt32Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + v := *p + b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + return b, nil +} +func appendZigzag32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := ptr.getInt32Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + } + return b, nil +} +func appendZigzag32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := ptr.getInt32Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + // compute size + n := 0 + for _, v := range s { + n += SizeVarint(uint64((uint32(v) << 1) ^ uint32((int32(v) >> 31)))) + } + b = appendVarint(b, uint64(n)) + for _, v := range s { + b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + } + return b, nil +} +func appendZigzag64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt64() + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) + return b, nil +} +func appendZigzag64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt64() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) + return b, nil +} +func appendZigzag64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toInt64Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + v := *p + b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) + return b, nil +} +func appendZigzag64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toInt64Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) + } + return b, nil +} +func appendZigzag64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toInt64Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + // compute size + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v<<1) ^ uint64((int64(v) >> 63))) + } + b = appendVarint(b, uint64(n)) + for _, v := range s { + b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) + } + return b, nil +} +func appendBoolValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toBool() + b = appendVarint(b, wiretag) + if v { + b = append(b, 1) + } else { + b = append(b, 0) + } + return b, nil +} +func appendBoolValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toBool() + if !v { + return b, nil + } + b = appendVarint(b, wiretag) + b = append(b, 1) + return b, nil +} + +func appendBoolPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toBoolPtr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + if *p { + b = append(b, 1) + } else { + b = append(b, 0) + } + return b, nil +} +func appendBoolSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toBoolSlice() + for _, v := range s { + b = appendVarint(b, wiretag) + if v { + b = append(b, 1) + } else { + b = append(b, 0) + } + } + return b, nil +} +func appendBoolPackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toBoolSlice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + b = appendVarint(b, uint64(len(s))) + for _, v := range s { + if v { + b = append(b, 1) + } else { + b = append(b, 0) + } + } + return b, nil +} +func appendStringValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toString() + if !utf8.ValidString(v) { + return nil, errInvalidUTF8 + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + return b, nil +} +func appendStringValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toString() + if v == "" { + return b, nil + } + if !utf8.ValidString(v) { + return nil, errInvalidUTF8 + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + return b, nil +} +func appendStringPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toStringPtr() + if p == nil { + return b, nil + } + v := *p + if !utf8.ValidString(v) { + return nil, errInvalidUTF8 + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + return b, nil +} +func appendStringSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toStringSlice() + for _, v := range s { + if !utf8.ValidString(v) { + return nil, errInvalidUTF8 + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + } + return b, nil +} +func appendBytes(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toBytes() + if v == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + return b, nil +} +func appendBytes3(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toBytes() + if len(v) == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + return b, nil +} +func appendBytesOneof(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toBytes() + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + return b, nil +} +func appendBytesSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toBytesSlice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + } + return b, nil +} + +// makeGroupMarshaler returns the sizer and marshaler for a group. +// u is the marshal info of the underlying message. +func makeGroupMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + p := ptr.getPointer() + if p.isNil() { + return 0 + } + return u.size(p) + 2*tagsize + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + p := ptr.getPointer() + if p.isNil() { + return b, nil + } + var err error + b = appendVarint(b, wiretag) // start group + b, err = u.marshal(b, p, deterministic) + b = appendVarint(b, wiretag+(WireEndGroup-WireStartGroup)) // end group + return b, err + } +} + +// makeGroupSliceMarshaler returns the sizer and marshaler for a group slice. +// u is the marshal info of the underlying message. +func makeGroupSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getPointerSlice() + n := 0 + for _, v := range s { + if v.isNil() { + continue + } + n += u.size(v) + 2*tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getPointerSlice() + var err, errreq error + for _, v := range s { + if v.isNil() { + return b, errRepeatedHasNil + } + b = appendVarint(b, wiretag) // start group + b, err = u.marshal(b, v, deterministic) + b = appendVarint(b, wiretag+(WireEndGroup-WireStartGroup)) // end group + if err != nil { + if _, ok := err.(*RequiredNotSetError); ok { + // Required field in submessage is not set. + // We record the error but keep going, to give a complete marshaling. + if errreq == nil { + errreq = err + } + continue + } + if err == ErrNil { + err = errRepeatedHasNil + } + return b, err + } + } + return b, errreq + } +} + +// makeMessageMarshaler returns the sizer and marshaler for a message field. +// u is the marshal info of the message. +func makeMessageMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + p := ptr.getPointer() + if p.isNil() { + return 0 + } + siz := u.size(p) + return siz + SizeVarint(uint64(siz)) + tagsize + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + p := ptr.getPointer() + if p.isNil() { + return b, nil + } + b = appendVarint(b, wiretag) + siz := u.cachedsize(p) + b = appendVarint(b, uint64(siz)) + return u.marshal(b, p, deterministic) + } +} + +// makeMessageSliceMarshaler returns the sizer and marshaler for a message slice. +// u is the marshal info of the message. +func makeMessageSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getPointerSlice() + n := 0 + for _, v := range s { + if v.isNil() { + continue + } + siz := u.size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getPointerSlice() + var err, errreq error + for _, v := range s { + if v.isNil() { + return b, errRepeatedHasNil + } + b = appendVarint(b, wiretag) + siz := u.cachedsize(v) + b = appendVarint(b, uint64(siz)) + b, err = u.marshal(b, v, deterministic) + + if err != nil { + if _, ok := err.(*RequiredNotSetError); ok { + // Required field in submessage is not set. + // We record the error but keep going, to give a complete marshaling. + if errreq == nil { + errreq = err + } + continue + } + if err == ErrNil { + err = errRepeatedHasNil + } + return b, err + } + } + return b, errreq + } +} + +// makeMapMarshaler returns the sizer and marshaler for a map field. +// f is the pointer to the reflect data structure of the field. +func makeMapMarshaler(f *reflect.StructField) (sizer, marshaler) { + // figure out key and value type + t := f.Type + keyType := t.Key() + valType := t.Elem() + keyTags := strings.Split(f.Tag.Get("protobuf_key"), ",") + valTags := strings.Split(f.Tag.Get("protobuf_val"), ",") + keySizer, keyMarshaler := typeMarshaler(keyType, keyTags, false, false) // don't omit zero value in map + valSizer, valMarshaler := typeMarshaler(valType, valTags, false, false) // don't omit zero value in map + keyWireTag := 1<<3 | wiretype(keyTags[0]) + valWireTag := 2<<3 | wiretype(valTags[0]) + + // We create an interface to get the addresses of the map key and value. + // If value is pointer-typed, the interface is a direct interface, the + // idata itself is the value. Otherwise, the idata is the pointer to the + // value. + // Key cannot be pointer-typed. + valIsPtr := valType.Kind() == reflect.Ptr + return func(ptr pointer, tagsize int) int { + m := ptr.asPointerTo(t).Elem() // the map + n := 0 + for _, k := range m.MapKeys() { + ki := k.Interface() + vi := m.MapIndex(k).Interface() + kaddr := toAddrPointer(&ki, false) // pointer to key + vaddr := toAddrPointer(&vi, valIsPtr) // pointer to value + siz := keySizer(kaddr, 1) + valSizer(vaddr, 1) // tag of key = 1 (size=1), tag of val = 2 (size=1) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, tag uint64, deterministic bool) ([]byte, error) { + m := ptr.asPointerTo(t).Elem() // the map + var err error + keys := m.MapKeys() + if len(keys) > 1 && deterministic { + sort.Sort(mapKeys(keys)) + } + for _, k := range keys { + ki := k.Interface() + vi := m.MapIndex(k).Interface() + kaddr := toAddrPointer(&ki, false) // pointer to key + vaddr := toAddrPointer(&vi, valIsPtr) // pointer to value + b = appendVarint(b, tag) + siz := keySizer(kaddr, 1) + valSizer(vaddr, 1) // tag of key = 1 (size=1), tag of val = 2 (size=1) + b = appendVarint(b, uint64(siz)) + b, err = keyMarshaler(b, kaddr, keyWireTag, deterministic) + if err != nil { + return b, err + } + b, err = valMarshaler(b, vaddr, valWireTag, deterministic) + if err != nil && err != ErrNil { // allow nil value in map + return b, err + } + } + return b, nil + } +} + +// makeOneOfMarshaler returns the sizer and marshaler for a oneof field. +// fi is the marshal info of the field. +// f is the pointer to the reflect data structure of the field. +func makeOneOfMarshaler(fi *marshalFieldInfo, f *reflect.StructField) (sizer, marshaler) { + // Oneof field is an interface. We need to get the actual data type on the fly. + t := f.Type + return func(ptr pointer, _ int) int { + p := ptr.getInterfacePointer() + if p.isNil() { + return 0 + } + v := ptr.asPointerTo(t).Elem().Elem().Elem() // *interface -> interface -> *struct -> struct + telem := v.Type() + e := fi.oneofElems[telem] + return e.sizer(p, e.tagsize) + }, + func(b []byte, ptr pointer, _ uint64, deterministic bool) ([]byte, error) { + p := ptr.getInterfacePointer() + if p.isNil() { + return b, nil + } + v := ptr.asPointerTo(t).Elem().Elem().Elem() // *interface -> interface -> *struct -> struct + telem := v.Type() + if telem.Field(0).Type.Kind() == reflect.Ptr && p.getPointer().isNil() { + return b, errOneofHasNil + } + e := fi.oneofElems[telem] + return e.marshaler(b, p, e.wiretag, deterministic) + } +} + +// sizeExtensions computes the size of encoded data for a XXX_InternalExtensions field. +func (u *marshalInfo) sizeExtensions(ext *XXX_InternalExtensions) int { + m, mu := ext.extensionsRead() + if m == nil { + return 0 + } + mu.Lock() + + n := 0 + for _, e := range m { + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + n += len(e.enc) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr) + n += ei.sizer(p, ei.tagsize) + } + mu.Unlock() + return n +} + +// appendExtensions marshals a XXX_InternalExtensions field to the end of byte slice b. +func (u *marshalInfo) appendExtensions(b []byte, ext *XXX_InternalExtensions, deterministic bool) ([]byte, error) { + m, mu := ext.extensionsRead() + if m == nil { + return b, nil + } + mu.Lock() + defer mu.Unlock() + + var err error + + // Fast-path for common cases: zero or one extensions. + // Don't bother sorting the keys. + if len(m) <= 1 { + for _, e := range m { + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + b = append(b, e.enc...) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr) + b, err = ei.marshaler(b, p, ei.wiretag, deterministic) + if err != nil { + return b, err + } + } + return b, nil + } + + // Sort the keys to provide a deterministic encoding. + // Not sure this is required, but the old code does it. + keys := make([]int, 0, len(m)) + for k := range m { + keys = append(keys, int(k)) + } + sort.Ints(keys) + + for _, k := range keys { + e := m[int32(k)] + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + b = append(b, e.enc...) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr) + b, err = ei.marshaler(b, p, ei.wiretag, deterministic) + if err != nil { + return b, err + } + } + return b, nil +} + +// message set format is: +// message MessageSet { +// repeated group Item = 1 { +// required int32 type_id = 2; +// required string message = 3; +// }; +// } + +// sizeMessageSet computes the size of encoded data for a XXX_InternalExtensions field +// in message set format (above). +func (u *marshalInfo) sizeMessageSet(ext *XXX_InternalExtensions) int { + m, mu := ext.extensionsRead() + if m == nil { + return 0 + } + mu.Lock() + + n := 0 + for id, e := range m { + n += 2 // start group, end group. tag = 1 (size=1) + n += SizeVarint(uint64(id)) + 1 // type_id, tag = 2 (size=1) + + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint + siz := len(msgWithLen) + n += siz + 1 // message, tag = 3 (size=1) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr) + n += ei.sizer(p, 1) // message, tag = 3 (size=1) + } + mu.Unlock() + return n +} + +// appendMessageSet marshals a XXX_InternalExtensions field in message set format (above) +// to the end of byte slice b. +func (u *marshalInfo) appendMessageSet(b []byte, ext *XXX_InternalExtensions, deterministic bool) ([]byte, error) { + m, mu := ext.extensionsRead() + if m == nil { + return b, nil + } + mu.Lock() + defer mu.Unlock() + + var err error + + // Fast-path for common cases: zero or one extensions. + // Don't bother sorting the keys. + if len(m) <= 1 { + for id, e := range m { + b = append(b, 1<<3|WireStartGroup) + b = append(b, 2<<3|WireVarint) + b = appendVarint(b, uint64(id)) + + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint + b = append(b, 3<<3|WireBytes) + b = append(b, msgWithLen...) + b = append(b, 1<<3|WireEndGroup) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr) + b, err = ei.marshaler(b, p, 3<<3|WireBytes, deterministic) + if err != nil { + return b, err + } + b = append(b, 1<<3|WireEndGroup) + } + return b, nil + } + + // Sort the keys to provide a deterministic encoding. + keys := make([]int, 0, len(m)) + for k := range m { + keys = append(keys, int(k)) + } + sort.Ints(keys) + + for _, id := range keys { + e := m[int32(id)] + b = append(b, 1<<3|WireStartGroup) + b = append(b, 2<<3|WireVarint) + b = appendVarint(b, uint64(id)) + + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint + b = append(b, 3<<3|WireBytes) + b = append(b, msgWithLen...) + b = append(b, 1<<3|WireEndGroup) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr) + b, err = ei.marshaler(b, p, 3<<3|WireBytes, deterministic) + b = append(b, 1<<3|WireEndGroup) + if err != nil { + return b, err + } + } + return b, nil +} + +// sizeV1Extensions computes the size of encoded data for a V1-API extension field. +func (u *marshalInfo) sizeV1Extensions(m map[int32]Extension) int { + if m == nil { + return 0 + } + + n := 0 + for _, e := range m { + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + n += len(e.enc) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr) + n += ei.sizer(p, ei.tagsize) + } + return n +} + +// appendV1Extensions marshals a V1-API extension field to the end of byte slice b. +func (u *marshalInfo) appendV1Extensions(b []byte, m map[int32]Extension, deterministic bool) ([]byte, error) { + if m == nil { + return b, nil + } + + // Sort the keys to provide a deterministic encoding. + keys := make([]int, 0, len(m)) + for k := range m { + keys = append(keys, int(k)) + } + sort.Ints(keys) + + var err error + for _, k := range keys { + e := m[int32(k)] + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + b = append(b, e.enc...) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr) + b, err = ei.marshaler(b, p, ei.wiretag, deterministic) + if err != nil { + return b, err + } + } + return b, nil +} + +// newMarshaler is the interface representing objects that can marshal themselves. +// +// This exists to support protoc-gen-go generated messages. +// The proto package will stop type-asserting to this interface in the future. +// +// DO NOT DEPEND ON THIS. +type newMarshaler interface { + XXX_Size() int + XXX_Marshal(b []byte, deterministic bool) ([]byte, error) +} + +// Size returns the encoded size of a protocol buffer message. +// This is the main entry point. +func Size(pb Message) int { + if m, ok := pb.(newMarshaler); ok { + return m.XXX_Size() + } + if m, ok := pb.(Marshaler); ok { + // If the message can marshal itself, let it do it, for compatibility. + // NOTE: This is not efficient. + b, _ := m.Marshal() + return len(b) + } + // in case somehow we didn't generate the wrapper + if pb == nil { + return 0 + } + var info InternalMessageInfo + return info.Size(pb) +} + +// Marshal takes a protocol buffer message +// and encodes it into the wire format, returning the data. +// This is the main entry point. +func Marshal(pb Message) ([]byte, error) { + if m, ok := pb.(newMarshaler); ok { + siz := m.XXX_Size() + b := make([]byte, 0, siz) + return m.XXX_Marshal(b, false) + } + if m, ok := pb.(Marshaler); ok { + // If the message can marshal itself, let it do it, for compatibility. + // NOTE: This is not efficient. + return m.Marshal() + } + // in case somehow we didn't generate the wrapper + if pb == nil { + return nil, ErrNil + } + var info InternalMessageInfo + siz := info.Size(pb) + b := make([]byte, 0, siz) + return info.Marshal(b, pb, false) +} + +// Marshal takes a protocol buffer message +// and encodes it into the wire format, writing the result to the +// Buffer. +// This is an alternative entry point. It is not necessary to use +// a Buffer for most applications. +func (p *Buffer) Marshal(pb Message) error { + var err error + if m, ok := pb.(newMarshaler); ok { + siz := m.XXX_Size() + p.grow(siz) // make sure buf has enough capacity + p.buf, err = m.XXX_Marshal(p.buf, p.deterministic) + return err + } + if m, ok := pb.(Marshaler); ok { + // If the message can marshal itself, let it do it, for compatibility. + // NOTE: This is not efficient. + b, err := m.Marshal() + p.buf = append(p.buf, b...) + return err + } + // in case somehow we didn't generate the wrapper + if pb == nil { + return ErrNil + } + var info InternalMessageInfo + siz := info.Size(pb) + p.grow(siz) // make sure buf has enough capacity + p.buf, err = info.Marshal(p.buf, pb, p.deterministic) + return err +} + +// grow grows the buffer's capacity, if necessary, to guarantee space for +// another n bytes. After grow(n), at least n bytes can be written to the +// buffer without another allocation. +func (p *Buffer) grow(n int) { + need := len(p.buf) + n + if need <= cap(p.buf) { + return + } + newCap := len(p.buf) * 2 + if newCap < need { + newCap = need + } + p.buf = append(make([]byte, 0, newCap), p.buf...) +} diff --git a/vendor/github.com/golang/protobuf/proto/table_merge.go b/vendor/github.com/golang/protobuf/proto/table_merge.go new file mode 100644 index 00000000000..5525def6a5d --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/table_merge.go @@ -0,0 +1,654 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "fmt" + "reflect" + "strings" + "sync" + "sync/atomic" +) + +// Merge merges the src message into dst. +// This assumes that dst and src of the same type and are non-nil. +func (a *InternalMessageInfo) Merge(dst, src Message) { + mi := atomicLoadMergeInfo(&a.merge) + if mi == nil { + mi = getMergeInfo(reflect.TypeOf(dst).Elem()) + atomicStoreMergeInfo(&a.merge, mi) + } + mi.merge(toPointer(&dst), toPointer(&src)) +} + +type mergeInfo struct { + typ reflect.Type + + initialized int32 // 0: only typ is valid, 1: everything is valid + lock sync.Mutex + + fields []mergeFieldInfo + unrecognized field // Offset of XXX_unrecognized +} + +type mergeFieldInfo struct { + field field // Offset of field, guaranteed to be valid + + // isPointer reports whether the value in the field is a pointer. + // This is true for the following situations: + // * Pointer to struct + // * Pointer to basic type (proto2 only) + // * Slice (first value in slice header is a pointer) + // * String (first value in string header is a pointer) + isPointer bool + + // basicWidth reports the width of the field assuming that it is directly + // embedded in the struct (as is the case for basic types in proto3). + // The possible values are: + // 0: invalid + // 1: bool + // 4: int32, uint32, float32 + // 8: int64, uint64, float64 + basicWidth int + + // Where dst and src are pointers to the types being merged. + merge func(dst, src pointer) +} + +var ( + mergeInfoMap = map[reflect.Type]*mergeInfo{} + mergeInfoLock sync.Mutex +) + +func getMergeInfo(t reflect.Type) *mergeInfo { + mergeInfoLock.Lock() + defer mergeInfoLock.Unlock() + mi := mergeInfoMap[t] + if mi == nil { + mi = &mergeInfo{typ: t} + mergeInfoMap[t] = mi + } + return mi +} + +// merge merges src into dst assuming they are both of type *mi.typ. +func (mi *mergeInfo) merge(dst, src pointer) { + if dst.isNil() { + panic("proto: nil destination") + } + if src.isNil() { + return // Nothing to do. + } + + if atomic.LoadInt32(&mi.initialized) == 0 { + mi.computeMergeInfo() + } + + for _, fi := range mi.fields { + sfp := src.offset(fi.field) + + // As an optimization, we can avoid the merge function call cost + // if we know for sure that the source will have no effect + // by checking if it is the zero value. + if unsafeAllowed { + if fi.isPointer && sfp.getPointer().isNil() { // Could be slice or string + continue + } + if fi.basicWidth > 0 { + switch { + case fi.basicWidth == 1 && !*sfp.toBool(): + continue + case fi.basicWidth == 4 && *sfp.toUint32() == 0: + continue + case fi.basicWidth == 8 && *sfp.toUint64() == 0: + continue + } + } + } + + dfp := dst.offset(fi.field) + fi.merge(dfp, sfp) + } + + // TODO: Make this faster? + out := dst.asPointerTo(mi.typ).Elem() + in := src.asPointerTo(mi.typ).Elem() + if emIn, err := extendable(in.Addr().Interface()); err == nil { + emOut, _ := extendable(out.Addr().Interface()) + mIn, muIn := emIn.extensionsRead() + if mIn != nil { + mOut := emOut.extensionsWrite() + muIn.Lock() + mergeExtension(mOut, mIn) + muIn.Unlock() + } + } + + if mi.unrecognized.IsValid() { + if b := *src.offset(mi.unrecognized).toBytes(); len(b) > 0 { + *dst.offset(mi.unrecognized).toBytes() = append([]byte(nil), b...) + } + } +} + +func (mi *mergeInfo) computeMergeInfo() { + mi.lock.Lock() + defer mi.lock.Unlock() + if mi.initialized != 0 { + return + } + t := mi.typ + n := t.NumField() + + props := GetProperties(t) + for i := 0; i < n; i++ { + f := t.Field(i) + if strings.HasPrefix(f.Name, "XXX_") { + continue + } + + mfi := mergeFieldInfo{field: toField(&f)} + tf := f.Type + + // As an optimization, we can avoid the merge function call cost + // if we know for sure that the source will have no effect + // by checking if it is the zero value. + if unsafeAllowed { + switch tf.Kind() { + case reflect.Ptr, reflect.Slice, reflect.String: + // As a special case, we assume slices and strings are pointers + // since we know that the first field in the SliceSlice or + // StringHeader is a data pointer. + mfi.isPointer = true + case reflect.Bool: + mfi.basicWidth = 1 + case reflect.Int32, reflect.Uint32, reflect.Float32: + mfi.basicWidth = 4 + case reflect.Int64, reflect.Uint64, reflect.Float64: + mfi.basicWidth = 8 + } + } + + // Unwrap tf to get at its most basic type. + var isPointer, isSlice bool + if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 { + isSlice = true + tf = tf.Elem() + } + if tf.Kind() == reflect.Ptr { + isPointer = true + tf = tf.Elem() + } + if isPointer && isSlice && tf.Kind() != reflect.Struct { + panic("both pointer and slice for basic type in " + tf.Name()) + } + + switch tf.Kind() { + case reflect.Int32: + switch { + case isSlice: // E.g., []int32 + mfi.merge = func(dst, src pointer) { + // NOTE: toInt32Slice is not defined (see pointer_reflect.go). + /* + sfsp := src.toInt32Slice() + if *sfsp != nil { + dfsp := dst.toInt32Slice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []int64{} + } + } + */ + sfs := src.getInt32Slice() + if sfs != nil { + dfs := dst.getInt32Slice() + dfs = append(dfs, sfs...) + if dfs == nil { + dfs = []int32{} + } + dst.setInt32Slice(dfs) + } + } + case isPointer: // E.g., *int32 + mfi.merge = func(dst, src pointer) { + // NOTE: toInt32Ptr is not defined (see pointer_reflect.go). + /* + sfpp := src.toInt32Ptr() + if *sfpp != nil { + dfpp := dst.toInt32Ptr() + if *dfpp == nil { + *dfpp = Int32(**sfpp) + } else { + **dfpp = **sfpp + } + } + */ + sfp := src.getInt32Ptr() + if sfp != nil { + dfp := dst.getInt32Ptr() + if dfp == nil { + dst.setInt32Ptr(*sfp) + } else { + *dfp = *sfp + } + } + } + default: // E.g., int32 + mfi.merge = func(dst, src pointer) { + if v := *src.toInt32(); v != 0 { + *dst.toInt32() = v + } + } + } + case reflect.Int64: + switch { + case isSlice: // E.g., []int64 + mfi.merge = func(dst, src pointer) { + sfsp := src.toInt64Slice() + if *sfsp != nil { + dfsp := dst.toInt64Slice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []int64{} + } + } + } + case isPointer: // E.g., *int64 + mfi.merge = func(dst, src pointer) { + sfpp := src.toInt64Ptr() + if *sfpp != nil { + dfpp := dst.toInt64Ptr() + if *dfpp == nil { + *dfpp = Int64(**sfpp) + } else { + **dfpp = **sfpp + } + } + } + default: // E.g., int64 + mfi.merge = func(dst, src pointer) { + if v := *src.toInt64(); v != 0 { + *dst.toInt64() = v + } + } + } + case reflect.Uint32: + switch { + case isSlice: // E.g., []uint32 + mfi.merge = func(dst, src pointer) { + sfsp := src.toUint32Slice() + if *sfsp != nil { + dfsp := dst.toUint32Slice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []uint32{} + } + } + } + case isPointer: // E.g., *uint32 + mfi.merge = func(dst, src pointer) { + sfpp := src.toUint32Ptr() + if *sfpp != nil { + dfpp := dst.toUint32Ptr() + if *dfpp == nil { + *dfpp = Uint32(**sfpp) + } else { + **dfpp = **sfpp + } + } + } + default: // E.g., uint32 + mfi.merge = func(dst, src pointer) { + if v := *src.toUint32(); v != 0 { + *dst.toUint32() = v + } + } + } + case reflect.Uint64: + switch { + case isSlice: // E.g., []uint64 + mfi.merge = func(dst, src pointer) { + sfsp := src.toUint64Slice() + if *sfsp != nil { + dfsp := dst.toUint64Slice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []uint64{} + } + } + } + case isPointer: // E.g., *uint64 + mfi.merge = func(dst, src pointer) { + sfpp := src.toUint64Ptr() + if *sfpp != nil { + dfpp := dst.toUint64Ptr() + if *dfpp == nil { + *dfpp = Uint64(**sfpp) + } else { + **dfpp = **sfpp + } + } + } + default: // E.g., uint64 + mfi.merge = func(dst, src pointer) { + if v := *src.toUint64(); v != 0 { + *dst.toUint64() = v + } + } + } + case reflect.Float32: + switch { + case isSlice: // E.g., []float32 + mfi.merge = func(dst, src pointer) { + sfsp := src.toFloat32Slice() + if *sfsp != nil { + dfsp := dst.toFloat32Slice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []float32{} + } + } + } + case isPointer: // E.g., *float32 + mfi.merge = func(dst, src pointer) { + sfpp := src.toFloat32Ptr() + if *sfpp != nil { + dfpp := dst.toFloat32Ptr() + if *dfpp == nil { + *dfpp = Float32(**sfpp) + } else { + **dfpp = **sfpp + } + } + } + default: // E.g., float32 + mfi.merge = func(dst, src pointer) { + if v := *src.toFloat32(); v != 0 { + *dst.toFloat32() = v + } + } + } + case reflect.Float64: + switch { + case isSlice: // E.g., []float64 + mfi.merge = func(dst, src pointer) { + sfsp := src.toFloat64Slice() + if *sfsp != nil { + dfsp := dst.toFloat64Slice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []float64{} + } + } + } + case isPointer: // E.g., *float64 + mfi.merge = func(dst, src pointer) { + sfpp := src.toFloat64Ptr() + if *sfpp != nil { + dfpp := dst.toFloat64Ptr() + if *dfpp == nil { + *dfpp = Float64(**sfpp) + } else { + **dfpp = **sfpp + } + } + } + default: // E.g., float64 + mfi.merge = func(dst, src pointer) { + if v := *src.toFloat64(); v != 0 { + *dst.toFloat64() = v + } + } + } + case reflect.Bool: + switch { + case isSlice: // E.g., []bool + mfi.merge = func(dst, src pointer) { + sfsp := src.toBoolSlice() + if *sfsp != nil { + dfsp := dst.toBoolSlice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []bool{} + } + } + } + case isPointer: // E.g., *bool + mfi.merge = func(dst, src pointer) { + sfpp := src.toBoolPtr() + if *sfpp != nil { + dfpp := dst.toBoolPtr() + if *dfpp == nil { + *dfpp = Bool(**sfpp) + } else { + **dfpp = **sfpp + } + } + } + default: // E.g., bool + mfi.merge = func(dst, src pointer) { + if v := *src.toBool(); v { + *dst.toBool() = v + } + } + } + case reflect.String: + switch { + case isSlice: // E.g., []string + mfi.merge = func(dst, src pointer) { + sfsp := src.toStringSlice() + if *sfsp != nil { + dfsp := dst.toStringSlice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []string{} + } + } + } + case isPointer: // E.g., *string + mfi.merge = func(dst, src pointer) { + sfpp := src.toStringPtr() + if *sfpp != nil { + dfpp := dst.toStringPtr() + if *dfpp == nil { + *dfpp = String(**sfpp) + } else { + **dfpp = **sfpp + } + } + } + default: // E.g., string + mfi.merge = func(dst, src pointer) { + if v := *src.toString(); v != "" { + *dst.toString() = v + } + } + } + case reflect.Slice: + isProto3 := props.Prop[i].proto3 + switch { + case isPointer: + panic("bad pointer in byte slice case in " + tf.Name()) + case tf.Elem().Kind() != reflect.Uint8: + panic("bad element kind in byte slice case in " + tf.Name()) + case isSlice: // E.g., [][]byte + mfi.merge = func(dst, src pointer) { + sbsp := src.toBytesSlice() + if *sbsp != nil { + dbsp := dst.toBytesSlice() + for _, sb := range *sbsp { + if sb == nil { + *dbsp = append(*dbsp, nil) + } else { + *dbsp = append(*dbsp, append([]byte{}, sb...)) + } + } + if *dbsp == nil { + *dbsp = [][]byte{} + } + } + } + default: // E.g., []byte + mfi.merge = func(dst, src pointer) { + sbp := src.toBytes() + if *sbp != nil { + dbp := dst.toBytes() + if !isProto3 || len(*sbp) > 0 { + *dbp = append([]byte{}, *sbp...) + } + } + } + } + case reflect.Struct: + switch { + case !isPointer: + panic(fmt.Sprintf("message field %s without pointer", tf)) + case isSlice: // E.g., []*pb.T + mi := getMergeInfo(tf) + mfi.merge = func(dst, src pointer) { + sps := src.getPointerSlice() + if sps != nil { + dps := dst.getPointerSlice() + for _, sp := range sps { + var dp pointer + if !sp.isNil() { + dp = valToPointer(reflect.New(tf)) + mi.merge(dp, sp) + } + dps = append(dps, dp) + } + if dps == nil { + dps = []pointer{} + } + dst.setPointerSlice(dps) + } + } + default: // E.g., *pb.T + mi := getMergeInfo(tf) + mfi.merge = func(dst, src pointer) { + sp := src.getPointer() + if !sp.isNil() { + dp := dst.getPointer() + if dp.isNil() { + dp = valToPointer(reflect.New(tf)) + dst.setPointer(dp) + } + mi.merge(dp, sp) + } + } + } + case reflect.Map: + switch { + case isPointer || isSlice: + panic("bad pointer or slice in map case in " + tf.Name()) + default: // E.g., map[K]V + mfi.merge = func(dst, src pointer) { + sm := src.asPointerTo(tf).Elem() + if sm.Len() == 0 { + return + } + dm := dst.asPointerTo(tf).Elem() + if dm.IsNil() { + dm.Set(reflect.MakeMap(tf)) + } + + switch tf.Elem().Kind() { + case reflect.Ptr: // Proto struct (e.g., *T) + for _, key := range sm.MapKeys() { + val := sm.MapIndex(key) + val = reflect.ValueOf(Clone(val.Interface().(Message))) + dm.SetMapIndex(key, val) + } + case reflect.Slice: // E.g. Bytes type (e.g., []byte) + for _, key := range sm.MapKeys() { + val := sm.MapIndex(key) + val = reflect.ValueOf(append([]byte{}, val.Bytes()...)) + dm.SetMapIndex(key, val) + } + default: // Basic type (e.g., string) + for _, key := range sm.MapKeys() { + val := sm.MapIndex(key) + dm.SetMapIndex(key, val) + } + } + } + } + case reflect.Interface: + // Must be oneof field. + switch { + case isPointer || isSlice: + panic("bad pointer or slice in interface case in " + tf.Name()) + default: // E.g., interface{} + // TODO: Make this faster? + mfi.merge = func(dst, src pointer) { + su := src.asPointerTo(tf).Elem() + if !su.IsNil() { + du := dst.asPointerTo(tf).Elem() + typ := su.Elem().Type() + if du.IsNil() || du.Elem().Type() != typ { + du.Set(reflect.New(typ.Elem())) // Initialize interface if empty + } + sv := su.Elem().Elem().Field(0) + if sv.Kind() == reflect.Ptr && sv.IsNil() { + return + } + dv := du.Elem().Elem().Field(0) + if dv.Kind() == reflect.Ptr && dv.IsNil() { + dv.Set(reflect.New(sv.Type().Elem())) // Initialize proto message if empty + } + switch sv.Type().Kind() { + case reflect.Ptr: // Proto struct (e.g., *T) + Merge(dv.Interface().(Message), sv.Interface().(Message)) + case reflect.Slice: // E.g. Bytes type (e.g., []byte) + dv.Set(reflect.ValueOf(append([]byte{}, sv.Bytes()...))) + default: // Basic type (e.g., string) + dv.Set(sv) + } + } + } + } + default: + panic(fmt.Sprintf("merger not found for type:%s", tf)) + } + mi.fields = append(mi.fields, mfi) + } + + mi.unrecognized = invalidField + if f, ok := t.FieldByName("XXX_unrecognized"); ok { + if f.Type != reflect.TypeOf([]byte{}) { + panic("expected XXX_unrecognized to be of type []byte") + } + mi.unrecognized = toField(&f) + } + + atomic.StoreInt32(&mi.initialized, 1) +} diff --git a/vendor/github.com/golang/protobuf/proto/table_unmarshal.go b/vendor/github.com/golang/protobuf/proto/table_unmarshal.go new file mode 100644 index 00000000000..55f0340a3fd --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/table_unmarshal.go @@ -0,0 +1,1967 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "errors" + "fmt" + "io" + "math" + "reflect" + "strconv" + "strings" + "sync" + "sync/atomic" + "unicode/utf8" +) + +// Unmarshal is the entry point from the generated .pb.go files. +// This function is not intended to be used by non-generated code. +// This function is not subject to any compatibility guarantee. +// msg contains a pointer to a protocol buffer struct. +// b is the data to be unmarshaled into the protocol buffer. +// a is a pointer to a place to store cached unmarshal information. +func (a *InternalMessageInfo) Unmarshal(msg Message, b []byte) error { + // Load the unmarshal information for this message type. + // The atomic load ensures memory consistency. + u := atomicLoadUnmarshalInfo(&a.unmarshal) + if u == nil { + // Slow path: find unmarshal info for msg, update a with it. + u = getUnmarshalInfo(reflect.TypeOf(msg).Elem()) + atomicStoreUnmarshalInfo(&a.unmarshal, u) + } + // Then do the unmarshaling. + err := u.unmarshal(toPointer(&msg), b) + return err +} + +type unmarshalInfo struct { + typ reflect.Type // type of the protobuf struct + + // 0 = only typ field is initialized + // 1 = completely initialized + initialized int32 + lock sync.Mutex // prevents double initialization + dense []unmarshalFieldInfo // fields indexed by tag # + sparse map[uint64]unmarshalFieldInfo // fields indexed by tag # + reqFields []string // names of required fields + reqMask uint64 // 1< 0 { + // Read tag and wire type. + // Special case 1 and 2 byte varints. + var x uint64 + if b[0] < 128 { + x = uint64(b[0]) + b = b[1:] + } else if len(b) >= 2 && b[1] < 128 { + x = uint64(b[0]&0x7f) + uint64(b[1])<<7 + b = b[2:] + } else { + var n int + x, n = decodeVarint(b) + if n == 0 { + return io.ErrUnexpectedEOF + } + b = b[n:] + } + tag := x >> 3 + wire := int(x) & 7 + + // Dispatch on the tag to one of the unmarshal* functions below. + var f unmarshalFieldInfo + if tag < uint64(len(u.dense)) { + f = u.dense[tag] + } else { + f = u.sparse[tag] + } + if fn := f.unmarshal; fn != nil { + var err error + b, err = fn(b, m.offset(f.field), wire) + if err == nil { + reqMask |= f.reqMask + continue + } + if r, ok := err.(*RequiredNotSetError); ok { + // Remember this error, but keep parsing. We need to produce + // a full parse even if a required field is missing. + rnse = r + reqMask |= f.reqMask + continue + } + if err != errInternalBadWireType { + return err + } + // Fragments with bad wire type are treated as unknown fields. + } + + // Unknown tag. + if !u.unrecognized.IsValid() { + // Don't keep unrecognized data; just skip it. + var err error + b, err = skipField(b, wire) + if err != nil { + return err + } + continue + } + // Keep unrecognized data around. + // maybe in extensions, maybe in the unrecognized field. + z := m.offset(u.unrecognized).toBytes() + var emap map[int32]Extension + var e Extension + for _, r := range u.extensionRanges { + if uint64(r.Start) <= tag && tag <= uint64(r.End) { + if u.extensions.IsValid() { + mp := m.offset(u.extensions).toExtensions() + emap = mp.extensionsWrite() + e = emap[int32(tag)] + z = &e.enc + break + } + if u.oldExtensions.IsValid() { + p := m.offset(u.oldExtensions).toOldExtensions() + emap = *p + if emap == nil { + emap = map[int32]Extension{} + *p = emap + } + e = emap[int32(tag)] + z = &e.enc + break + } + panic("no extensions field available") + } + } + + // Use wire type to skip data. + var err error + b0 := b + b, err = skipField(b, wire) + if err != nil { + return err + } + *z = encodeVarint(*z, tag<<3|uint64(wire)) + *z = append(*z, b0[:len(b0)-len(b)]...) + + if emap != nil { + emap[int32(tag)] = e + } + } + if rnse != nil { + // A required field of a submessage/group is missing. Return that error. + return rnse + } + if reqMask != u.reqMask { + // A required field of this message is missing. + for _, n := range u.reqFields { + if reqMask&1 == 0 { + return &RequiredNotSetError{n} + } + reqMask >>= 1 + } + } + return nil +} + +// computeUnmarshalInfo fills in u with information for use +// in unmarshaling protocol buffers of type u.typ. +func (u *unmarshalInfo) computeUnmarshalInfo() { + u.lock.Lock() + defer u.lock.Unlock() + if u.initialized != 0 { + return + } + t := u.typ + n := t.NumField() + + // Set up the "not found" value for the unrecognized byte buffer. + // This is the default for proto3. + u.unrecognized = invalidField + u.extensions = invalidField + u.oldExtensions = invalidField + + // List of the generated type and offset for each oneof field. + type oneofField struct { + ityp reflect.Type // interface type of oneof field + field field // offset in containing message + } + var oneofFields []oneofField + + for i := 0; i < n; i++ { + f := t.Field(i) + if f.Name == "XXX_unrecognized" { + // The byte slice used to hold unrecognized input is special. + if f.Type != reflect.TypeOf(([]byte)(nil)) { + panic("bad type for XXX_unrecognized field: " + f.Type.Name()) + } + u.unrecognized = toField(&f) + continue + } + if f.Name == "XXX_InternalExtensions" { + // Ditto here. + if f.Type != reflect.TypeOf(XXX_InternalExtensions{}) { + panic("bad type for XXX_InternalExtensions field: " + f.Type.Name()) + } + u.extensions = toField(&f) + if f.Tag.Get("protobuf_messageset") == "1" { + u.isMessageSet = true + } + continue + } + if f.Name == "XXX_extensions" { + // An older form of the extensions field. + if f.Type != reflect.TypeOf((map[int32]Extension)(nil)) { + panic("bad type for XXX_extensions field: " + f.Type.Name()) + } + u.oldExtensions = toField(&f) + continue + } + if f.Name == "XXX_NoUnkeyedLiteral" || f.Name == "XXX_sizecache" { + continue + } + + oneof := f.Tag.Get("protobuf_oneof") + if oneof != "" { + oneofFields = append(oneofFields, oneofField{f.Type, toField(&f)}) + // The rest of oneof processing happens below. + continue + } + + tags := f.Tag.Get("protobuf") + tagArray := strings.Split(tags, ",") + if len(tagArray) < 2 { + panic("protobuf tag not enough fields in " + t.Name() + "." + f.Name + ": " + tags) + } + tag, err := strconv.Atoi(tagArray[1]) + if err != nil { + panic("protobuf tag field not an integer: " + tagArray[1]) + } + + name := "" + for _, tag := range tagArray[3:] { + if strings.HasPrefix(tag, "name=") { + name = tag[5:] + } + } + + // Extract unmarshaling function from the field (its type and tags). + unmarshal := fieldUnmarshaler(&f) + + // Required field? + var reqMask uint64 + if tagArray[2] == "req" { + bit := len(u.reqFields) + u.reqFields = append(u.reqFields, name) + reqMask = uint64(1) << uint(bit) + // TODO: if we have more than 64 required fields, we end up + // not verifying that all required fields are present. + // Fix this, perhaps using a count of required fields? + } + + // Store the info in the correct slot in the message. + u.setTag(tag, toField(&f), unmarshal, reqMask) + } + + // Find any types associated with oneof fields. + // TODO: XXX_OneofFuncs returns more info than we need. Get rid of some of it? + fn := reflect.Zero(reflect.PtrTo(t)).MethodByName("XXX_OneofFuncs") + if fn.IsValid() { + res := fn.Call(nil)[3] // last return value from XXX_OneofFuncs: []interface{} + for i := res.Len() - 1; i >= 0; i-- { + v := res.Index(i) // interface{} + tptr := reflect.ValueOf(v.Interface()).Type() // *Msg_X + typ := tptr.Elem() // Msg_X + + f := typ.Field(0) // oneof implementers have one field + baseUnmarshal := fieldUnmarshaler(&f) + tagstr := strings.Split(f.Tag.Get("protobuf"), ",")[1] + tag, err := strconv.Atoi(tagstr) + if err != nil { + panic("protobuf tag field not an integer: " + tagstr) + } + + // Find the oneof field that this struct implements. + // Might take O(n^2) to process all of the oneofs, but who cares. + for _, of := range oneofFields { + if tptr.Implements(of.ityp) { + // We have found the corresponding interface for this struct. + // That lets us know where this struct should be stored + // when we encounter it during unmarshaling. + unmarshal := makeUnmarshalOneof(typ, of.ityp, baseUnmarshal) + u.setTag(tag, of.field, unmarshal, 0) + } + } + } + } + + // Get extension ranges, if any. + fn = reflect.Zero(reflect.PtrTo(t)).MethodByName("ExtensionRangeArray") + if fn.IsValid() { + if !u.extensions.IsValid() && !u.oldExtensions.IsValid() { + panic("a message with extensions, but no extensions field in " + t.Name()) + } + u.extensionRanges = fn.Call(nil)[0].Interface().([]ExtensionRange) + } + + // Explicitly disallow tag 0. This will ensure we flag an error + // when decoding a buffer of all zeros. Without this code, we + // would decode and skip an all-zero buffer of even length. + // [0 0] is [tag=0/wiretype=varint varint-encoded-0]. + u.setTag(0, zeroField, func(b []byte, f pointer, w int) ([]byte, error) { + return nil, fmt.Errorf("proto: %s: illegal tag 0 (wire type %d)", t, w) + }, 0) + + // Set mask for required field check. + u.reqMask = uint64(1)<= 0 && (tag < 16 || tag < 2*n) { // TODO: what are the right numbers here? + for len(u.dense) <= tag { + u.dense = append(u.dense, unmarshalFieldInfo{}) + } + u.dense[tag] = i + return + } + if u.sparse == nil { + u.sparse = map[uint64]unmarshalFieldInfo{} + } + u.sparse[uint64(tag)] = i +} + +// fieldUnmarshaler returns an unmarshaler for the given field. +func fieldUnmarshaler(f *reflect.StructField) unmarshaler { + if f.Type.Kind() == reflect.Map { + return makeUnmarshalMap(f) + } + return typeUnmarshaler(f.Type, f.Tag.Get("protobuf")) +} + +// typeUnmarshaler returns an unmarshaler for the given field type / field tag pair. +func typeUnmarshaler(t reflect.Type, tags string) unmarshaler { + tagArray := strings.Split(tags, ",") + encoding := tagArray[0] + name := "unknown" + for _, tag := range tagArray[3:] { + if strings.HasPrefix(tag, "name=") { + name = tag[5:] + } + } + + // Figure out packaging (pointer, slice, or both) + slice := false + pointer := false + if t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 { + slice = true + t = t.Elem() + } + if t.Kind() == reflect.Ptr { + pointer = true + t = t.Elem() + } + + // We'll never have both pointer and slice for basic types. + if pointer && slice && t.Kind() != reflect.Struct { + panic("both pointer and slice for basic type in " + t.Name()) + } + + switch t.Kind() { + case reflect.Bool: + if pointer { + return unmarshalBoolPtr + } + if slice { + return unmarshalBoolSlice + } + return unmarshalBoolValue + case reflect.Int32: + switch encoding { + case "fixed32": + if pointer { + return unmarshalFixedS32Ptr + } + if slice { + return unmarshalFixedS32Slice + } + return unmarshalFixedS32Value + case "varint": + // this could be int32 or enum + if pointer { + return unmarshalInt32Ptr + } + if slice { + return unmarshalInt32Slice + } + return unmarshalInt32Value + case "zigzag32": + if pointer { + return unmarshalSint32Ptr + } + if slice { + return unmarshalSint32Slice + } + return unmarshalSint32Value + } + case reflect.Int64: + switch encoding { + case "fixed64": + if pointer { + return unmarshalFixedS64Ptr + } + if slice { + return unmarshalFixedS64Slice + } + return unmarshalFixedS64Value + case "varint": + if pointer { + return unmarshalInt64Ptr + } + if slice { + return unmarshalInt64Slice + } + return unmarshalInt64Value + case "zigzag64": + if pointer { + return unmarshalSint64Ptr + } + if slice { + return unmarshalSint64Slice + } + return unmarshalSint64Value + } + case reflect.Uint32: + switch encoding { + case "fixed32": + if pointer { + return unmarshalFixed32Ptr + } + if slice { + return unmarshalFixed32Slice + } + return unmarshalFixed32Value + case "varint": + if pointer { + return unmarshalUint32Ptr + } + if slice { + return unmarshalUint32Slice + } + return unmarshalUint32Value + } + case reflect.Uint64: + switch encoding { + case "fixed64": + if pointer { + return unmarshalFixed64Ptr + } + if slice { + return unmarshalFixed64Slice + } + return unmarshalFixed64Value + case "varint": + if pointer { + return unmarshalUint64Ptr + } + if slice { + return unmarshalUint64Slice + } + return unmarshalUint64Value + } + case reflect.Float32: + if pointer { + return unmarshalFloat32Ptr + } + if slice { + return unmarshalFloat32Slice + } + return unmarshalFloat32Value + case reflect.Float64: + if pointer { + return unmarshalFloat64Ptr + } + if slice { + return unmarshalFloat64Slice + } + return unmarshalFloat64Value + case reflect.Map: + panic("map type in typeUnmarshaler in " + t.Name()) + case reflect.Slice: + if pointer { + panic("bad pointer in slice case in " + t.Name()) + } + if slice { + return unmarshalBytesSlice + } + return unmarshalBytesValue + case reflect.String: + if pointer { + return unmarshalStringPtr + } + if slice { + return unmarshalStringSlice + } + return unmarshalStringValue + case reflect.Struct: + // message or group field + if !pointer { + panic(fmt.Sprintf("message/group field %s:%s without pointer", t, encoding)) + } + switch encoding { + case "bytes": + if slice { + return makeUnmarshalMessageSlicePtr(getUnmarshalInfo(t), name) + } + return makeUnmarshalMessagePtr(getUnmarshalInfo(t), name) + case "group": + if slice { + return makeUnmarshalGroupSlicePtr(getUnmarshalInfo(t), name) + } + return makeUnmarshalGroupPtr(getUnmarshalInfo(t), name) + } + } + panic(fmt.Sprintf("unmarshaler not found type:%s encoding:%s", t, encoding)) +} + +// Below are all the unmarshalers for individual fields of various types. + +func unmarshalInt64Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x) + *f.toInt64() = v + return b, nil +} + +func unmarshalInt64Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x) + *f.toInt64Ptr() = &v + return b, nil +} + +func unmarshalInt64Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + x, n = decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x) + s := f.toInt64Slice() + *s = append(*s, v) + } + return res, nil + } + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x) + s := f.toInt64Slice() + *s = append(*s, v) + return b, nil +} + +func unmarshalSint64Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x>>1) ^ int64(x)<<63>>63 + *f.toInt64() = v + return b, nil +} + +func unmarshalSint64Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x>>1) ^ int64(x)<<63>>63 + *f.toInt64Ptr() = &v + return b, nil +} + +func unmarshalSint64Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + x, n = decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x>>1) ^ int64(x)<<63>>63 + s := f.toInt64Slice() + *s = append(*s, v) + } + return res, nil + } + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x>>1) ^ int64(x)<<63>>63 + s := f.toInt64Slice() + *s = append(*s, v) + return b, nil +} + +func unmarshalUint64Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint64(x) + *f.toUint64() = v + return b, nil +} + +func unmarshalUint64Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint64(x) + *f.toUint64Ptr() = &v + return b, nil +} + +func unmarshalUint64Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + x, n = decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint64(x) + s := f.toUint64Slice() + *s = append(*s, v) + } + return res, nil + } + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint64(x) + s := f.toUint64Slice() + *s = append(*s, v) + return b, nil +} + +func unmarshalInt32Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x) + *f.toInt32() = v + return b, nil +} + +func unmarshalInt32Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x) + f.setInt32Ptr(v) + return b, nil +} + +func unmarshalInt32Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + x, n = decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x) + f.appendInt32Slice(v) + } + return res, nil + } + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x) + f.appendInt32Slice(v) + return b, nil +} + +func unmarshalSint32Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x>>1) ^ int32(x)<<31>>31 + *f.toInt32() = v + return b, nil +} + +func unmarshalSint32Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x>>1) ^ int32(x)<<31>>31 + f.setInt32Ptr(v) + return b, nil +} + +func unmarshalSint32Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + x, n = decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x>>1) ^ int32(x)<<31>>31 + f.appendInt32Slice(v) + } + return res, nil + } + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x>>1) ^ int32(x)<<31>>31 + f.appendInt32Slice(v) + return b, nil +} + +func unmarshalUint32Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint32(x) + *f.toUint32() = v + return b, nil +} + +func unmarshalUint32Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint32(x) + *f.toUint32Ptr() = &v + return b, nil +} + +func unmarshalUint32Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + x, n = decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint32(x) + s := f.toUint32Slice() + *s = append(*s, v) + } + return res, nil + } + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint32(x) + s := f.toUint32Slice() + *s = append(*s, v) + return b, nil +} + +func unmarshalFixed64Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 + *f.toUint64() = v + return b[8:], nil +} + +func unmarshalFixed64Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 + *f.toUint64Ptr() = &v + return b[8:], nil +} + +func unmarshalFixed64Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 + s := f.toUint64Slice() + *s = append(*s, v) + b = b[8:] + } + return res, nil + } + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 + s := f.toUint64Slice() + *s = append(*s, v) + return b[8:], nil +} + +func unmarshalFixedS64Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56 + *f.toInt64() = v + return b[8:], nil +} + +func unmarshalFixedS64Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56 + *f.toInt64Ptr() = &v + return b[8:], nil +} + +func unmarshalFixedS64Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56 + s := f.toInt64Slice() + *s = append(*s, v) + b = b[8:] + } + return res, nil + } + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56 + s := f.toInt64Slice() + *s = append(*s, v) + return b[8:], nil +} + +func unmarshalFixed32Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 + *f.toUint32() = v + return b[4:], nil +} + +func unmarshalFixed32Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 + *f.toUint32Ptr() = &v + return b[4:], nil +} + +func unmarshalFixed32Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 + s := f.toUint32Slice() + *s = append(*s, v) + b = b[4:] + } + return res, nil + } + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 + s := f.toUint32Slice() + *s = append(*s, v) + return b[4:], nil +} + +func unmarshalFixedS32Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24 + *f.toInt32() = v + return b[4:], nil +} + +func unmarshalFixedS32Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24 + f.setInt32Ptr(v) + return b[4:], nil +} + +func unmarshalFixedS32Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24 + f.appendInt32Slice(v) + b = b[4:] + } + return res, nil + } + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24 + f.appendInt32Slice(v) + return b[4:], nil +} + +func unmarshalBoolValue(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + // Note: any length varint is allowed, even though any sane + // encoder will use one byte. + // See https://github.com/golang/protobuf/issues/76 + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + // TODO: check if x>1? Tests seem to indicate no. + v := x != 0 + *f.toBool() = v + return b[n:], nil +} + +func unmarshalBoolPtr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + v := x != 0 + *f.toBoolPtr() = &v + return b[n:], nil +} + +func unmarshalBoolSlice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + x, n = decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + v := x != 0 + s := f.toBoolSlice() + *s = append(*s, v) + b = b[n:] + } + return res, nil + } + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + v := x != 0 + s := f.toBoolSlice() + *s = append(*s, v) + return b[n:], nil +} + +func unmarshalFloat64Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56) + *f.toFloat64() = v + return b[8:], nil +} + +func unmarshalFloat64Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56) + *f.toFloat64Ptr() = &v + return b[8:], nil +} + +func unmarshalFloat64Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56) + s := f.toFloat64Slice() + *s = append(*s, v) + b = b[8:] + } + return res, nil + } + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56) + s := f.toFloat64Slice() + *s = append(*s, v) + return b[8:], nil +} + +func unmarshalFloat32Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24) + *f.toFloat32() = v + return b[4:], nil +} + +func unmarshalFloat32Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24) + *f.toFloat32Ptr() = &v + return b[4:], nil +} + +func unmarshalFloat32Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24) + s := f.toFloat32Slice() + *s = append(*s, v) + b = b[4:] + } + return res, nil + } + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24) + s := f.toFloat32Slice() + *s = append(*s, v) + return b[4:], nil +} + +func unmarshalStringValue(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := string(b[:x]) + if !utf8.ValidString(v) { + return nil, errInvalidUTF8 + } + *f.toString() = v + return b[x:], nil +} + +func unmarshalStringPtr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := string(b[:x]) + if !utf8.ValidString(v) { + return nil, errInvalidUTF8 + } + *f.toStringPtr() = &v + return b[x:], nil +} + +func unmarshalStringSlice(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := string(b[:x]) + if !utf8.ValidString(v) { + return nil, errInvalidUTF8 + } + s := f.toStringSlice() + *s = append(*s, v) + return b[x:], nil +} + +var emptyBuf [0]byte + +func unmarshalBytesValue(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + // The use of append here is a trick which avoids the zeroing + // that would be required if we used a make/copy pair. + // We append to emptyBuf instead of nil because we want + // a non-nil result even when the length is 0. + v := append(emptyBuf[:], b[:x]...) + *f.toBytes() = v + return b[x:], nil +} + +func unmarshalBytesSlice(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := append(emptyBuf[:], b[:x]...) + s := f.toBytesSlice() + *s = append(*s, v) + return b[x:], nil +} + +func makeUnmarshalMessagePtr(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + // First read the message field to see if something is there. + // The semantics of multiple submessages are weird. Instead of + // the last one winning (as it is for all other fields), multiple + // submessages are merged. + v := f.getPointer() + if v.isNil() { + v = valToPointer(reflect.New(sub.typ)) + f.setPointer(v) + } + err := sub.unmarshal(v, b[:x]) + if err != nil { + if r, ok := err.(*RequiredNotSetError); ok { + r.field = name + "." + r.field + } else { + return nil, err + } + } + return b[x:], err + } +} + +func makeUnmarshalMessageSlicePtr(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := valToPointer(reflect.New(sub.typ)) + err := sub.unmarshal(v, b[:x]) + if err != nil { + if r, ok := err.(*RequiredNotSetError); ok { + r.field = name + "." + r.field + } else { + return nil, err + } + } + f.appendPointer(v) + return b[x:], err + } +} + +func makeUnmarshalGroupPtr(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireStartGroup { + return b, errInternalBadWireType + } + x, y := findEndGroup(b) + if x < 0 { + return nil, io.ErrUnexpectedEOF + } + v := f.getPointer() + if v.isNil() { + v = valToPointer(reflect.New(sub.typ)) + f.setPointer(v) + } + err := sub.unmarshal(v, b[:x]) + if err != nil { + if r, ok := err.(*RequiredNotSetError); ok { + r.field = name + "." + r.field + } else { + return nil, err + } + } + return b[y:], err + } +} + +func makeUnmarshalGroupSlicePtr(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireStartGroup { + return b, errInternalBadWireType + } + x, y := findEndGroup(b) + if x < 0 { + return nil, io.ErrUnexpectedEOF + } + v := valToPointer(reflect.New(sub.typ)) + err := sub.unmarshal(v, b[:x]) + if err != nil { + if r, ok := err.(*RequiredNotSetError); ok { + r.field = name + "." + r.field + } else { + return nil, err + } + } + f.appendPointer(v) + return b[y:], err + } +} + +func makeUnmarshalMap(f *reflect.StructField) unmarshaler { + t := f.Type + kt := t.Key() + vt := t.Elem() + unmarshalKey := typeUnmarshaler(kt, f.Tag.Get("protobuf_key")) + unmarshalVal := typeUnmarshaler(vt, f.Tag.Get("protobuf_val")) + return func(b []byte, f pointer, w int) ([]byte, error) { + // The map entry is a submessage. Figure out how big it is. + if w != WireBytes { + return nil, fmt.Errorf("proto: bad wiretype for map field: got %d want %d", w, WireBytes) + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + r := b[x:] // unused data to return + b = b[:x] // data for map entry + + // Note: we could use #keys * #values ~= 200 functions + // to do map decoding without reflection. Probably not worth it. + // Maps will be somewhat slow. Oh well. + + // Read key and value from data. + k := reflect.New(kt) + v := reflect.New(vt) + for len(b) > 0 { + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + wire := int(x) & 7 + b = b[n:] + + var err error + switch x >> 3 { + case 1: + b, err = unmarshalKey(b, valToPointer(k), wire) + case 2: + b, err = unmarshalVal(b, valToPointer(v), wire) + default: + err = errInternalBadWireType // skip unknown tag + } + + if err == nil { + continue + } + if err != errInternalBadWireType { + return nil, err + } + + // Skip past unknown fields. + b, err = skipField(b, wire) + if err != nil { + return nil, err + } + } + + // Get map, allocate if needed. + m := f.asPointerTo(t).Elem() // an addressable map[K]T + if m.IsNil() { + m.Set(reflect.MakeMap(t)) + } + + // Insert into map. + m.SetMapIndex(k.Elem(), v.Elem()) + + return r, nil + } +} + +// makeUnmarshalOneof makes an unmarshaler for oneof fields. +// for: +// message Msg { +// oneof F { +// int64 X = 1; +// float64 Y = 2; +// } +// } +// typ is the type of the concrete entry for a oneof case (e.g. Msg_X). +// ityp is the interface type of the oneof field (e.g. isMsg_F). +// unmarshal is the unmarshaler for the base type of the oneof case (e.g. int64). +// Note that this function will be called once for each case in the oneof. +func makeUnmarshalOneof(typ, ityp reflect.Type, unmarshal unmarshaler) unmarshaler { + sf := typ.Field(0) + field0 := toField(&sf) + return func(b []byte, f pointer, w int) ([]byte, error) { + // Allocate holder for value. + v := reflect.New(typ) + + // Unmarshal data into holder. + // We unmarshal into the first field of the holder object. + var err error + b, err = unmarshal(b, valToPointer(v).offset(field0), w) + if err != nil { + return nil, err + } + + // Write pointer to holder into target field. + f.asPointerTo(ityp).Elem().Set(v) + + return b, nil + } +} + +// Error used by decode internally. +var errInternalBadWireType = errors.New("proto: internal error: bad wiretype") + +// skipField skips past a field of type wire and returns the remaining bytes. +func skipField(b []byte, wire int) ([]byte, error) { + switch wire { + case WireVarint: + _, k := decodeVarint(b) + if k == 0 { + return b, io.ErrUnexpectedEOF + } + b = b[k:] + case WireFixed32: + if len(b) < 4 { + return b, io.ErrUnexpectedEOF + } + b = b[4:] + case WireFixed64: + if len(b) < 8 { + return b, io.ErrUnexpectedEOF + } + b = b[8:] + case WireBytes: + m, k := decodeVarint(b) + if k == 0 || uint64(len(b)-k) < m { + return b, io.ErrUnexpectedEOF + } + b = b[uint64(k)+m:] + case WireStartGroup: + _, i := findEndGroup(b) + if i == -1 { + return b, io.ErrUnexpectedEOF + } + b = b[i:] + default: + return b, fmt.Errorf("proto: can't skip unknown wire type %d", wire) + } + return b, nil +} + +// findEndGroup finds the index of the next EndGroup tag. +// Groups may be nested, so the "next" EndGroup tag is the first +// unpaired EndGroup. +// findEndGroup returns the indexes of the start and end of the EndGroup tag. +// Returns (-1,-1) if it can't find one. +func findEndGroup(b []byte) (int, int) { + depth := 1 + i := 0 + for { + x, n := decodeVarint(b[i:]) + if n == 0 { + return -1, -1 + } + j := i + i += n + switch x & 7 { + case WireVarint: + _, k := decodeVarint(b[i:]) + if k == 0 { + return -1, -1 + } + i += k + case WireFixed32: + if len(b)-4 < i { + return -1, -1 + } + i += 4 + case WireFixed64: + if len(b)-8 < i { + return -1, -1 + } + i += 8 + case WireBytes: + m, k := decodeVarint(b[i:]) + if k == 0 { + return -1, -1 + } + i += k + if uint64(len(b)-i) < m { + return -1, -1 + } + i += int(m) + case WireStartGroup: + depth++ + case WireEndGroup: + depth-- + if depth == 0 { + return j, i + } + default: + return -1, -1 + } + } +} + +// encodeVarint appends a varint-encoded integer to b and returns the result. +func encodeVarint(b []byte, x uint64) []byte { + for x >= 1<<7 { + b = append(b, byte(x&0x7f|0x80)) + x >>= 7 + } + return append(b, byte(x)) +} + +// decodeVarint reads a varint-encoded integer from b. +// Returns the decoded integer and the number of bytes read. +// If there is an error, it returns 0,0. +func decodeVarint(b []byte) (uint64, int) { + var x, y uint64 + if len(b) <= 0 { + goto bad + } + x = uint64(b[0]) + if x < 0x80 { + return x, 1 + } + x -= 0x80 + + if len(b) <= 1 { + goto bad + } + y = uint64(b[1]) + x += y << 7 + if y < 0x80 { + return x, 2 + } + x -= 0x80 << 7 + + if len(b) <= 2 { + goto bad + } + y = uint64(b[2]) + x += y << 14 + if y < 0x80 { + return x, 3 + } + x -= 0x80 << 14 + + if len(b) <= 3 { + goto bad + } + y = uint64(b[3]) + x += y << 21 + if y < 0x80 { + return x, 4 + } + x -= 0x80 << 21 + + if len(b) <= 4 { + goto bad + } + y = uint64(b[4]) + x += y << 28 + if y < 0x80 { + return x, 5 + } + x -= 0x80 << 28 + + if len(b) <= 5 { + goto bad + } + y = uint64(b[5]) + x += y << 35 + if y < 0x80 { + return x, 6 + } + x -= 0x80 << 35 + + if len(b) <= 6 { + goto bad + } + y = uint64(b[6]) + x += y << 42 + if y < 0x80 { + return x, 7 + } + x -= 0x80 << 42 + + if len(b) <= 7 { + goto bad + } + y = uint64(b[7]) + x += y << 49 + if y < 0x80 { + return x, 8 + } + x -= 0x80 << 49 + + if len(b) <= 8 { + goto bad + } + y = uint64(b[8]) + x += y << 56 + if y < 0x80 { + return x, 9 + } + x -= 0x80 << 56 + + if len(b) <= 9 { + goto bad + } + y = uint64(b[9]) + x += y << 63 + if y < 2 { + return x, 10 + } + +bad: + return 0, 0 +} diff --git a/vendor/github.com/golang/protobuf/proto/text.go b/vendor/github.com/golang/protobuf/proto/text.go index 965876bf033..2205fdaadf8 100644 --- a/vendor/github.com/golang/protobuf/proto/text.go +++ b/vendor/github.com/golang/protobuf/proto/text.go @@ -50,7 +50,6 @@ import ( var ( newline = []byte("\n") spaces = []byte(" ") - gtNewline = []byte(">\n") endBraceNewline = []byte("}\n") backslashN = []byte{'\\', 'n'} backslashR = []byte{'\\', 'r'} @@ -170,11 +169,6 @@ func writeName(w *textWriter, props *Properties) error { return nil } -// raw is the interface satisfied by RawMessage. -type raw interface { - Bytes() []byte -} - func requiresQuotes(u string) bool { // When type URL contains any characters except [0-9A-Za-z./\-]*, it must be quoted. for _, ch := range u { @@ -269,6 +263,10 @@ func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error { props := sprops.Prop[i] name := st.Field(i).Name + if name == "XXX_NoUnkeyedLiteral" { + continue + } + if strings.HasPrefix(name, "XXX_") { // There are two XXX_ fields: // XXX_unrecognized []byte @@ -436,12 +434,6 @@ func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error { return err } } - if b, ok := fv.Interface().(raw); ok { - if err := writeRaw(w, b.Bytes()); err != nil { - return err - } - continue - } // Enums have a String method, so writeAny will work fine. if err := tm.writeAny(w, fv, props); err != nil { @@ -455,7 +447,7 @@ func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error { // Extensions (the XXX_extensions field). pv := sv.Addr() - if _, ok := extendable(pv.Interface()); ok { + if _, err := extendable(pv.Interface()); err == nil { if err := tm.writeExtensions(w, pv); err != nil { return err } @@ -464,27 +456,6 @@ func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error { return nil } -// writeRaw writes an uninterpreted raw message. -func writeRaw(w *textWriter, b []byte) error { - if err := w.WriteByte('<'); err != nil { - return err - } - if !w.compact { - if err := w.WriteByte('\n'); err != nil { - return err - } - } - w.indent() - if err := writeUnknownStruct(w, b); err != nil { - return err - } - w.unindent() - if err := w.WriteByte('>'); err != nil { - return err - } - return nil -} - // writeAny writes an arbitrary field. func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Properties) error { v = reflect.Indirect(v) @@ -535,6 +506,19 @@ func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Propert } } w.indent() + if v.CanAddr() { + // Calling v.Interface on a struct causes the reflect package to + // copy the entire struct. This is racy with the new Marshaler + // since we atomically update the XXX_sizecache. + // + // Thus, we retrieve a pointer to the struct if possible to avoid + // a race since v.Interface on the pointer doesn't copy the struct. + // + // If v is not addressable, then we are not worried about a race + // since it implies that the binary Marshaler cannot possibly be + // mutating this value. + v = v.Addr() + } if etm, ok := v.Interface().(encoding.TextMarshaler); ok { text, err := etm.MarshalText() if err != nil { @@ -543,8 +527,13 @@ func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Propert if _, err = w.Write(text); err != nil { return err } - } else if err := tm.writeStruct(w, v); err != nil { - return err + } else { + if v.Kind() == reflect.Ptr { + v = v.Elem() + } + if err := tm.writeStruct(w, v); err != nil { + return err + } } w.unindent() if err := w.WriteByte(ket); err != nil { diff --git a/vendor/github.com/golang/protobuf/proto/text_parser.go b/vendor/github.com/golang/protobuf/proto/text_parser.go index 5e14513f28c..0685bae36d5 100644 --- a/vendor/github.com/golang/protobuf/proto/text_parser.go +++ b/vendor/github.com/golang/protobuf/proto/text_parser.go @@ -206,7 +206,6 @@ func (p *textParser) advance() { var ( errBadUTF8 = errors.New("proto: bad UTF-8") - errBadHex = errors.New("proto: bad hexadecimal") ) func unquoteC(s string, quote rune) (string, error) { @@ -277,60 +276,47 @@ func unescape(s string) (ch string, tail string, err error) { return "?", s, nil // trigraph workaround case '\'', '"', '\\': return string(r), s, nil - case '0', '1', '2', '3', '4', '5', '6', '7', 'x', 'X': + case '0', '1', '2', '3', '4', '5', '6', '7': if len(s) < 2 { return "", "", fmt.Errorf(`\%c requires 2 following digits`, r) } - base := 8 - ss := s[:2] + ss := string(r) + s[:2] s = s[2:] - if r == 'x' || r == 'X' { - base = 16 - } else { - ss = string(r) + ss - } - i, err := strconv.ParseUint(ss, base, 8) + i, err := strconv.ParseUint(ss, 8, 8) if err != nil { - return "", "", err + return "", "", fmt.Errorf(`\%s contains non-octal digits`, ss) } return string([]byte{byte(i)}), s, nil - case 'u', 'U': - n := 4 - if r == 'U' { + case 'x', 'X', 'u', 'U': + var n int + switch r { + case 'x', 'X': + n = 2 + case 'u': + n = 4 + case 'U': n = 8 } if len(s) < n { - return "", "", fmt.Errorf(`\%c requires %d digits`, r, n) - } - - bs := make([]byte, n/2) - for i := 0; i < n; i += 2 { - a, ok1 := unhex(s[i]) - b, ok2 := unhex(s[i+1]) - if !ok1 || !ok2 { - return "", "", errBadHex - } - bs[i/2] = a<<4 | b + return "", "", fmt.Errorf(`\%c requires %d following digits`, r, n) } + ss := s[:n] s = s[n:] - return string(bs), s, nil + i, err := strconv.ParseUint(ss, 16, 64) + if err != nil { + return "", "", fmt.Errorf(`\%c%s contains non-hexadecimal digits`, r, ss) + } + if r == 'x' || r == 'X' { + return string([]byte{byte(i)}), s, nil + } + if i > utf8.MaxRune { + return "", "", fmt.Errorf(`\%c%s is not a valid Unicode code point`, r, ss) + } + return string(i), s, nil } return "", "", fmt.Errorf(`unknown escape \%c`, r) } -// Adapted from src/pkg/strconv/quote.go. -func unhex(b byte) (v byte, ok bool) { - switch { - case '0' <= b && b <= '9': - return b - '0', true - case 'a' <= b && b <= 'f': - return b - 'a' + 10, true - case 'A' <= b && b <= 'F': - return b - 'A' + 10, true - } - return 0, false -} - // Back off the parser by one token. Can only be done between calls to next(). // It makes the next advance() a no-op. func (p *textParser) back() { p.backed = true } @@ -728,6 +714,9 @@ func (p *textParser) consumeExtName() (string, error) { if tok.err != nil { return "", p.errorf("unrecognized type_url or extension name: %s", tok.err) } + if p.done && tok.value != "]" { + return "", p.errorf("unclosed type_url or extension name") + } } return strings.Join(parts, ""), nil } @@ -865,7 +854,7 @@ func (p *textParser) readAny(v reflect.Value, props *Properties) error { return p.readStruct(fv, terminator) case reflect.Uint32: if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil { - fv.SetUint(x) + fv.SetUint(uint64(x)) return nil } case reflect.Uint64: @@ -883,13 +872,9 @@ func (p *textParser) readAny(v reflect.Value, props *Properties) error { // UnmarshalText returns *RequiredNotSetError. func UnmarshalText(s string, pb Message) error { if um, ok := pb.(encoding.TextUnmarshaler); ok { - err := um.UnmarshalText([]byte(s)) - return err + return um.UnmarshalText([]byte(s)) } pb.Reset() v := reflect.ValueOf(pb) - if pe := newTextParser(s).readStruct(v.Elem(), ""); pe != nil { - return pe - } - return nil + return newTextParser(s).readStruct(v.Elem(), "") } diff --git a/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go b/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go index f34601723de..f67edc7dc2b 100644 --- a/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go +++ b/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go @@ -1,16 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // source: google/protobuf/any.proto -/* -Package any is a generated protocol buffer package. - -It is generated from these files: - google/protobuf/any.proto - -It has these top-level messages: - Any -*/ -package any +package any // import "github.com/golang/protobuf/ptypes/any" import proto "github.com/golang/protobuf/proto" import fmt "fmt" @@ -132,14 +123,36 @@ type Any struct { // TypeUrl string `protobuf:"bytes,1,opt,name=type_url,json=typeUrl" json:"type_url,omitempty"` // Must be a valid serialized protocol buffer of the above specified type. - Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` + Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Any) Reset() { *m = Any{} } +func (m *Any) String() string { return proto.CompactTextString(m) } +func (*Any) ProtoMessage() {} +func (*Any) Descriptor() ([]byte, []int) { + return fileDescriptor_any_744b9ca530f228db, []int{0} +} +func (*Any) XXX_WellKnownType() string { return "Any" } +func (m *Any) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Any.Unmarshal(m, b) +} +func (m *Any) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Any.Marshal(b, m, deterministic) +} +func (dst *Any) XXX_Merge(src proto.Message) { + xxx_messageInfo_Any.Merge(dst, src) +} +func (m *Any) XXX_Size() int { + return xxx_messageInfo_Any.Size(m) +} +func (m *Any) XXX_DiscardUnknown() { + xxx_messageInfo_Any.DiscardUnknown(m) } -func (m *Any) Reset() { *m = Any{} } -func (m *Any) String() string { return proto.CompactTextString(m) } -func (*Any) ProtoMessage() {} -func (*Any) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } -func (*Any) XXX_WellKnownType() string { return "Any" } +var xxx_messageInfo_Any proto.InternalMessageInfo func (m *Any) GetTypeUrl() string { if m != nil { @@ -159,9 +172,9 @@ func init() { proto.RegisterType((*Any)(nil), "google.protobuf.Any") } -func init() { proto.RegisterFile("google/protobuf/any.proto", fileDescriptor0) } +func init() { proto.RegisterFile("google/protobuf/any.proto", fileDescriptor_any_744b9ca530f228db) } -var fileDescriptor0 = []byte{ +var fileDescriptor_any_744b9ca530f228db = []byte{ // 185 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4c, 0xcf, 0xcf, 0x4f, 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0xcc, 0xab, 0xd4, diff --git a/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go b/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go index b2410a098eb..4d75473b8b2 100644 --- a/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go +++ b/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go @@ -1,16 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // source: google/protobuf/duration.proto -/* -Package duration is a generated protocol buffer package. - -It is generated from these files: - google/protobuf/duration.proto - -It has these top-level messages: - Duration -*/ -package duration +package duration // import "github.com/golang/protobuf/ptypes/duration" import proto "github.com/golang/protobuf/proto" import fmt "fmt" @@ -98,14 +89,36 @@ type Duration struct { // of one second or more, a non-zero value for the `nanos` field must be // of the same sign as the `seconds` field. Must be from -999,999,999 // to +999,999,999 inclusive. - Nanos int32 `protobuf:"varint,2,opt,name=nanos" json:"nanos,omitempty"` + Nanos int32 `protobuf:"varint,2,opt,name=nanos" json:"nanos,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *Duration) Reset() { *m = Duration{} } -func (m *Duration) String() string { return proto.CompactTextString(m) } -func (*Duration) ProtoMessage() {} -func (*Duration) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } -func (*Duration) XXX_WellKnownType() string { return "Duration" } +func (m *Duration) Reset() { *m = Duration{} } +func (m *Duration) String() string { return proto.CompactTextString(m) } +func (*Duration) ProtoMessage() {} +func (*Duration) Descriptor() ([]byte, []int) { + return fileDescriptor_duration_e7d612259e3f0613, []int{0} +} +func (*Duration) XXX_WellKnownType() string { return "Duration" } +func (m *Duration) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Duration.Unmarshal(m, b) +} +func (m *Duration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Duration.Marshal(b, m, deterministic) +} +func (dst *Duration) XXX_Merge(src proto.Message) { + xxx_messageInfo_Duration.Merge(dst, src) +} +func (m *Duration) XXX_Size() int { + return xxx_messageInfo_Duration.Size(m) +} +func (m *Duration) XXX_DiscardUnknown() { + xxx_messageInfo_Duration.DiscardUnknown(m) +} + +var xxx_messageInfo_Duration proto.InternalMessageInfo func (m *Duration) GetSeconds() int64 { if m != nil { @@ -125,9 +138,11 @@ func init() { proto.RegisterType((*Duration)(nil), "google.protobuf.Duration") } -func init() { proto.RegisterFile("google/protobuf/duration.proto", fileDescriptor0) } +func init() { + proto.RegisterFile("google/protobuf/duration.proto", fileDescriptor_duration_e7d612259e3f0613) +} -var fileDescriptor0 = []byte{ +var fileDescriptor_duration_e7d612259e3f0613 = []byte{ // 190 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4b, 0xcf, 0xcf, 0x4f, 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0x29, 0x2d, 0x4a, diff --git a/vendor/github.com/golang/protobuf/ptypes/regen.sh b/vendor/github.com/golang/protobuf/ptypes/regen.sh deleted file mode 100755 index b50a9414ac2..00000000000 --- a/vendor/github.com/golang/protobuf/ptypes/regen.sh +++ /dev/null @@ -1,43 +0,0 @@ -#!/bin/bash -e -# -# This script fetches and rebuilds the "well-known types" protocol buffers. -# To run this you will need protoc and goprotobuf installed; -# see https://github.com/golang/protobuf for instructions. -# You also need Go and Git installed. - -PKG=github.com/golang/protobuf/ptypes -UPSTREAM=https://github.com/google/protobuf -UPSTREAM_SUBDIR=src/google/protobuf -PROTO_FILES=(any duration empty struct timestamp wrappers) - -function die() { - echo 1>&2 $* - exit 1 -} - -# Sanity check that the right tools are accessible. -for tool in go git protoc protoc-gen-go; do - q=$(which $tool) || die "didn't find $tool" - echo 1>&2 "$tool: $q" -done - -tmpdir=$(mktemp -d -t regen-wkt.XXXXXX) -trap 'rm -rf $tmpdir' EXIT - -echo -n 1>&2 "finding package dir... " -pkgdir=$(go list -f '{{.Dir}}' $PKG) -echo 1>&2 $pkgdir -base=$(echo $pkgdir | sed "s,/$PKG\$,,") -echo 1>&2 "base: $base" -cd "$base" - -echo 1>&2 "fetching latest protos... " -git clone -q $UPSTREAM $tmpdir - -for file in ${PROTO_FILES[@]}; do - echo 1>&2 "* $file" - protoc --go_out=. -I$tmpdir/src $tmpdir/src/google/protobuf/$file.proto || die - cp $tmpdir/src/google/protobuf/$file.proto $PKG/$file -done - -echo 1>&2 "All OK" diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go index e23e4a25daf..e9c22228216 100644 --- a/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go +++ b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go @@ -1,16 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // source: google/protobuf/timestamp.proto -/* -Package timestamp is a generated protocol buffer package. - -It is generated from these files: - google/protobuf/timestamp.proto - -It has these top-level messages: - Timestamp -*/ -package timestamp +package timestamp // import "github.com/golang/protobuf/ptypes/timestamp" import proto "github.com/golang/protobuf/proto" import fmt "fmt" @@ -101,7 +92,7 @@ const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package // to this format using [`strftime`](https://docs.python.org/2/library/time.html#time.strftime) // with the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one // can use the Joda Time's [`ISODateTimeFormat.dateTime()`]( -// http://joda-time.sourceforge.net/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime()) +// http://www.joda.org/joda-time/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime--) // to obtain a formatter capable of generating timestamps in this format. // // @@ -114,14 +105,36 @@ type Timestamp struct { // second values with fractions must still have non-negative nanos values // that count forward in time. Must be from 0 to 999,999,999 // inclusive. - Nanos int32 `protobuf:"varint,2,opt,name=nanos" json:"nanos,omitempty"` + Nanos int32 `protobuf:"varint,2,opt,name=nanos" json:"nanos,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } -func (m *Timestamp) Reset() { *m = Timestamp{} } -func (m *Timestamp) String() string { return proto.CompactTextString(m) } -func (*Timestamp) ProtoMessage() {} -func (*Timestamp) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } -func (*Timestamp) XXX_WellKnownType() string { return "Timestamp" } +func (m *Timestamp) Reset() { *m = Timestamp{} } +func (m *Timestamp) String() string { return proto.CompactTextString(m) } +func (*Timestamp) ProtoMessage() {} +func (*Timestamp) Descriptor() ([]byte, []int) { + return fileDescriptor_timestamp_b826e8e5fba671a8, []int{0} +} +func (*Timestamp) XXX_WellKnownType() string { return "Timestamp" } +func (m *Timestamp) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Timestamp.Unmarshal(m, b) +} +func (m *Timestamp) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Timestamp.Marshal(b, m, deterministic) +} +func (dst *Timestamp) XXX_Merge(src proto.Message) { + xxx_messageInfo_Timestamp.Merge(dst, src) +} +func (m *Timestamp) XXX_Size() int { + return xxx_messageInfo_Timestamp.Size(m) +} +func (m *Timestamp) XXX_DiscardUnknown() { + xxx_messageInfo_Timestamp.DiscardUnknown(m) +} + +var xxx_messageInfo_Timestamp proto.InternalMessageInfo func (m *Timestamp) GetSeconds() int64 { if m != nil { @@ -141,9 +154,11 @@ func init() { proto.RegisterType((*Timestamp)(nil), "google.protobuf.Timestamp") } -func init() { proto.RegisterFile("google/protobuf/timestamp.proto", fileDescriptor0) } +func init() { + proto.RegisterFile("google/protobuf/timestamp.proto", fileDescriptor_timestamp_b826e8e5fba671a8) +} -var fileDescriptor0 = []byte{ +var fileDescriptor_timestamp_b826e8e5fba671a8 = []byte{ // 191 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4f, 0xcf, 0xcf, 0x4f, 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0xc9, 0xcc, 0x4d, diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto index b7cbd17502f..06750ab1f12 100644 --- a/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto +++ b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto @@ -114,7 +114,7 @@ option objc_class_prefix = "GPB"; // to this format using [`strftime`](https://docs.python.org/2/library/time.html#time.strftime) // with the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one // can use the Joda Time's [`ISODateTimeFormat.dateTime()`]( -// http://joda-time.sourceforge.net/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime()) +// http://www.joda.org/joda-time/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime--) // to obtain a formatter capable of generating timestamps in this format. // // diff --git a/vendor/github.com/google/go-jsonnet/ast/clone.go b/vendor/github.com/google/go-jsonnet/ast/clone.go index faa76f04f35..84ca8611ece 100644 --- a/vendor/github.com/google/go-jsonnet/ast/clone.go +++ b/vendor/github.com/google/go-jsonnet/ast/clone.go @@ -25,9 +25,11 @@ import ( func cloneForSpec(specPtr *ForSpec) { clone(&specPtr.Expr) oldOuter := specPtr.Outer - specPtr.Outer = new(ForSpec) - *specPtr.Outer = *oldOuter - cloneForSpec(specPtr.Outer) + if oldOuter != nil { + specPtr.Outer = new(ForSpec) + *specPtr.Outer = *oldOuter + cloneForSpec(specPtr.Outer) + } for i := range specPtr.Conditions { clone(&specPtr.Conditions[i].Expr) } diff --git a/vendor/github.com/google/go-jsonnet/ast/stdast.go b/vendor/github.com/google/go-jsonnet/ast/stdast.go index 3433a965086..b0b23d6cf8f 100644 --- a/vendor/github.com/google/go-jsonnet/ast/stdast.go +++ b/vendor/github.com/google/go-jsonnet/ast/stdast.go @@ -11,1836 +11,3440 @@ var p3Var = "$" var p3 = &p3Var var p9Var = "object " var p9 = &p9Var -var p43Var = "object " -var p43 = &p43Var -var p54Var = "thunk from >" +var p11Var = "object " +var p11 = &p11Var +var p35Var = "object " +var p35 = &p35Var +var p39Var = "object " +var p39 = &p39Var +var p42Var = "object " +var p42 = &p42Var +var p45Var = "object " +var p45 = &p45Var +var p48Var = "object " +var p48 = &p48Var +var p51Var = "object " +var p51 = &p51Var +var p54Var = "object " var p54 = &p54Var -var p69Var = "function " -var p69 = &p69Var -var p78Var = "thunk from >" -var p78 = &p78Var -var p95Var = "function " -var p95 = &p95Var -var p104Var = "thunk from >" -var p104 = &p104Var -var p121Var = "function " -var p121 = &p121Var -var p130Var = "thunk from >" -var p130 = &p130Var -var p147Var = "function " -var p147 = &p147Var -var p156Var = "thunk from >" -var p156 = &p156Var -var p173Var = "function " -var p173 = &p173Var -var p182Var = "thunk from >" -var p182 = &p182Var -var p199Var = "function " -var p199 = &p199Var -var p208Var = "thunk from >" -var p208 = &p208Var -var p217Var = "function " +var p57Var = "object " +var p57 = &p57Var +var p62Var = "thunk from >" +var p62 = &p62Var +var p64Var = "thunk from >" +var p64 = &p64Var +var p68Var = "object " +var p68 = &p68Var +var p80Var = "function " +var p80 = &p80Var +var p89Var = "thunk from >" +var p89 = &p89Var +var p96Var = "object " +var p96 = &p96Var +var p98Var = "object " +var p98 = &p98Var +var p122Var = "object " +var p122 = &p122Var +var p126Var = "object " +var p126 = &p126Var +var p129Var = "object " +var p129 = &p129Var +var p132Var = "object " +var p132 = &p132Var +var p135Var = "object " +var p135 = &p135Var +var p138Var = "object " +var p138 = &p138Var +var p141Var = "object " +var p141 = &p141Var +var p144Var = "object " +var p144 = &p144Var +var p149Var = "thunk from >" +var p149 = &p149Var +var p151Var = "thunk from >" +var p151 = &p151Var +var p165Var = "function " +var p165 = &p165Var +var p174Var = "thunk from >" +var p174 = &p174Var +var p181Var = "object " +var p181 = &p181Var +var p183Var = "object " +var p183 = &p183Var +var p207Var = "object " +var p207 = &p207Var +var p211Var = "object " +var p211 = &p211Var +var p214Var = "object " +var p214 = &p214Var +var p217Var = "object " var p217 = &p217Var -var p236Var = "thunk from >" +var p220Var = "object " +var p220 = &p220Var +var p223Var = "object " +var p223 = &p223Var +var p226Var = "object " +var p226 = &p226Var +var p229Var = "object " +var p229 = &p229Var +var p234Var = "thunk from >" +var p234 = &p234Var +var p236Var = "thunk from >" var p236 = &p236Var -var p252Var = "function " -var p252 = &p252Var -var p273Var = "thunk from >" -var p273 = &p273Var -var p290Var = "thunk from >" -var p290 = &p290Var -var p313Var = "thunk from >" -var p313 = &p313Var -var p330Var = "thunk from >" -var p330 = &p330Var -var p353Var = "thunk from >" +var p250Var = "function " +var p250 = &p250Var +var p259Var = "thunk from >" +var p259 = &p259Var +var p266Var = "object " +var p266 = &p266Var +var p268Var = "object " +var p268 = &p268Var +var p292Var = "object " +var p292 = &p292Var +var p296Var = "object " +var p296 = &p296Var +var p299Var = "object " +var p299 = &p299Var +var p302Var = "object " +var p302 = &p302Var +var p305Var = "object " +var p305 = &p305Var +var p308Var = "object " +var p308 = &p308Var +var p311Var = "object " +var p311 = &p311Var +var p314Var = "object " +var p314 = &p314Var +var p319Var = "thunk from >" +var p319 = &p319Var +var p321Var = "thunk from >" +var p321 = &p321Var +var p335Var = "function " +var p335 = &p335Var +var p344Var = "thunk from >" +var p344 = &p344Var +var p351Var = "object " +var p351 = &p351Var +var p353Var = "object " var p353 = &p353Var -var p370Var = "thunk from >" -var p370 = &p370Var -var p395Var = "thunk from >" -var p395 = &p395Var -var p405Var = "thunk from >>" -var p405 = &p405Var -var p411Var = "function " -var p411 = &p411Var -var p427Var = "function " -var p427 = &p427Var -var p440Var = "thunk from >" -var p440 = &p440Var -var p451Var = "thunk from >" -var p451 = &p451Var -var p471Var = "thunk from >" -var p471 = &p471Var -var p483Var = "thunk from >>" -var p483 = &p483Var -var p493Var = "function " -var p493 = &p493Var -var p506Var = "thunk from >" -var p506 = &p506Var -var p517Var = "thunk from >" -var p517 = &p517Var -var p537Var = "thunk from >" -var p537 = &p537Var -var p550Var = "thunk from >>" -var p550 = &p550Var -var p561Var = "thunk from >>" -var p561 = &p561Var -var p572Var = "thunk from >>" -var p572 = &p572Var +var p377Var = "object " +var p377 = &p377Var +var p381Var = "object " +var p381 = &p381Var +var p384Var = "object " +var p384 = &p384Var +var p387Var = "object " +var p387 = &p387Var +var p390Var = "object " +var p390 = &p390Var +var p393Var = "object " +var p393 = &p393Var +var p396Var = "object " +var p396 = &p396Var +var p399Var = "object " +var p399 = &p399Var +var p404Var = "thunk from >" +var p404 = &p404Var +var p406Var = "thunk from >" +var p406 = &p406Var +var p420Var = "function " +var p420 = &p420Var +var p429Var = "thunk from >" +var p429 = &p429Var +var p436Var = "object " +var p436 = &p436Var +var p438Var = "object " +var p438 = &p438Var +var p462Var = "object " +var p462 = &p462Var +var p466Var = "object " +var p466 = &p466Var +var p469Var = "object " +var p469 = &p469Var +var p472Var = "object " +var p472 = &p472Var +var p475Var = "object " +var p475 = &p475Var +var p478Var = "object " +var p478 = &p478Var +var p481Var = "object " +var p481 = &p481Var +var p484Var = "object " +var p484 = &p484Var +var p489Var = "thunk from >" +var p489 = &p489Var +var p491Var = "thunk from >" +var p491 = &p491Var +var p505Var = "function " +var p505 = &p505Var +var p514Var = "thunk from >" +var p514 = &p514Var +var p521Var = "object " +var p521 = &p521Var +var p523Var = "object " +var p523 = &p523Var +var p547Var = "object " +var p547 = &p547Var +var p551Var = "object " +var p551 = &p551Var +var p554Var = "object " +var p554 = &p554Var +var p557Var = "object " +var p557 = &p557Var +var p560Var = "object " +var p560 = &p560Var +var p563Var = "object " +var p563 = &p563Var +var p566Var = "object " +var p566 = &p566Var +var p569Var = "object " +var p569 = &p569Var +var p574Var = "thunk from >" +var p574 = &p574Var +var p576Var = "thunk from >" +var p576 = &p576Var var p582Var = "function " var p582 = &p582Var -var p591Var = "thunk from >" -var p591 = &p591Var -var p600Var = "thunk from >>" -var p600 = &p600Var -var p606Var = "function " -var p606 = &p606Var -var p618Var = "function " -var p618 = &p618Var -var p622Var = "thunk from >" -var p622 = &p622Var -var p625Var = "function " -var p625 = &p625Var -var p654Var = "thunk from >" +var p601Var = "thunk from >" +var p601 = &p601Var +var p615Var = "object " +var p615 = &p615Var +var p617Var = "object " +var p617 = &p617Var +var p641Var = "object " +var p641 = &p641Var +var p645Var = "object " +var p645 = &p645Var +var p648Var = "object " +var p648 = &p648Var +var p651Var = "object " +var p651 = &p651Var +var p654Var = "object " var p654 = &p654Var -var p672Var = "thunk from >" -var p672 = &p672Var -var p683Var = "thunk from >>" -var p683 = &p683Var -var p694Var = "thunk from >>" -var p694 = &p694Var -var p696Var = "function " -var p696 = &p696Var -var p705Var = "thunk from >" -var p705 = &p705Var -var p734Var = "thunk from >" -var p734 = &p734Var -var p750Var = "thunk from >>" -var p750 = &p750Var -var p765Var = "thunk from >" -var p765 = &p765Var -var p773Var = "thunk from >>" -var p773 = &p773Var -var p782Var = "function " -var p782 = &p782Var -var p803Var = "thunk from >" -var p803 = &p803Var -var p820Var = "thunk from >" -var p820 = &p820Var -var p843Var = "thunk from >" -var p843 = &p843Var -var p860Var = "thunk from >" -var p860 = &p860Var -var p883Var = "thunk from >" -var p883 = &p883Var -var p900Var = "thunk from >" -var p900 = &p900Var -var p911Var = "thunk from >" -var p911 = &p911Var -var p923Var = "function " +var p657Var = "object " +var p657 = &p657Var +var p660Var = "object " +var p660 = &p660Var +var p663Var = "object " +var p663 = &p663Var +var p668Var = "thunk from >" +var p668 = &p668Var +var p670Var = "thunk from >" +var p670 = &p670Var +var p676Var = "function " +var p676 = &p676Var +var p697Var = "thunk from >" +var p697 = &p697Var +var p714Var = "thunk from >" +var p714 = &p714Var +var p737Var = "thunk from >" +var p737 = &p737Var +var p754Var = "thunk from >" +var p754 = &p754Var +var p777Var = "thunk from >" +var p777 = &p777Var +var p794Var = "thunk from >" +var p794 = &p794Var +var p819Var = "thunk from >" +var p819 = &p819Var +var p829Var = "thunk from >>" +var p829 = &p829Var +var p835Var = "function " +var p835 = &p835Var +var p849Var = "object " +var p849 = &p849Var +var p851Var = "object " +var p851 = &p851Var +var p875Var = "object " +var p875 = &p875Var +var p879Var = "object " +var p879 = &p879Var +var p882Var = "object " +var p882 = &p882Var +var p885Var = "object " +var p885 = &p885Var +var p888Var = "object " +var p888 = &p888Var +var p891Var = "object " +var p891 = &p891Var +var p894Var = "object " +var p894 = &p894Var +var p897Var = "object " +var p897 = &p897Var +var p902Var = "thunk from >" +var p902 = &p902Var +var p904Var = "thunk from >" +var p904 = &p904Var +var p910Var = "function " +var p910 = &p910Var +var p923Var = "thunk from >" var p923 = &p923Var -var p944Var = "thunk from >" -var p944 = &p944Var -var p961Var = "thunk from >" -var p961 = &p961Var -var p984Var = "thunk from >" -var p984 = &p984Var -var p1001Var = "thunk from >" -var p1001 = &p1001Var -var p1024Var = "thunk from >" -var p1024 = &p1024Var -var p1041Var = "thunk from >" -var p1041 = &p1041Var -var p1064Var = "thunk from >" -var p1064 = &p1064Var -var p1081Var = "thunk from >" -var p1081 = &p1081Var -var p1087Var = "thunk from >" -var p1087 = &p1087Var -var p1091Var = "function " -var p1091 = &p1091Var -var p1095Var = "thunk from >" -var p1095 = &p1095Var -var p1105Var = "thunk from >" -var p1105 = &p1105Var -var p1125Var = "thunk from >" -var p1125 = &p1125Var -var p1135Var = "thunk from >" -var p1135 = &p1135Var -var p1178Var = "thunk from >" -var p1178 = &p1178Var -var p1188Var = "thunk from >" -var p1188 = &p1188Var -var p1202Var = "thunk from >>" -var p1202 = &p1202Var -var p1211Var = "thunk from >" -var p1211 = &p1211Var -var p1231Var = "thunk from >" -var p1231 = &p1231Var -var p1254Var = "function " -var p1254 = &p1254Var -var p1263Var = "thunk from >" -var p1263 = &p1263Var -var p1285Var = "thunk from >" +var p934Var = "thunk from >" +var p934 = &p934Var +var p954Var = "thunk from >" +var p954 = &p954Var +var p966Var = "thunk from >>" +var p966 = &p966Var +var p974Var = "object " +var p974 = &p974Var +var p976Var = "object " +var p976 = &p976Var +var p1000Var = "object " +var p1000 = &p1000Var +var p1004Var = "object " +var p1004 = &p1004Var +var p1007Var = "object " +var p1007 = &p1007Var +var p1010Var = "object " +var p1010 = &p1010Var +var p1013Var = "object " +var p1013 = &p1013Var +var p1016Var = "object " +var p1016 = &p1016Var +var p1019Var = "object " +var p1019 = &p1019Var +var p1022Var = "object " +var p1022 = &p1022Var +var p1027Var = "thunk from >" +var p1027 = &p1027Var +var p1029Var = "thunk from >" +var p1029 = &p1029Var +var p1035Var = "function " +var p1035 = &p1035Var +var p1048Var = "thunk from >" +var p1048 = &p1048Var +var p1059Var = "thunk from >" +var p1059 = &p1059Var +var p1079Var = "thunk from >" +var p1079 = &p1079Var +var p1092Var = "thunk from >>" +var p1092 = &p1092Var +var p1103Var = "thunk from >>" +var p1103 = &p1103Var +var p1114Var = "thunk from >>" +var p1114 = &p1114Var +var p1122Var = "object " +var p1122 = &p1122Var +var p1124Var = "object " +var p1124 = &p1124Var +var p1148Var = "object " +var p1148 = &p1148Var +var p1152Var = "object " +var p1152 = &p1152Var +var p1155Var = "object " +var p1155 = &p1155Var +var p1158Var = "object " +var p1158 = &p1158Var +var p1161Var = "object " +var p1161 = &p1161Var +var p1164Var = "object " +var p1164 = &p1164Var +var p1167Var = "object " +var p1167 = &p1167Var +var p1170Var = "object " +var p1170 = &p1170Var +var p1175Var = "thunk from >" +var p1175 = &p1175Var +var p1177Var = "thunk from >" +var p1177 = &p1177Var +var p1183Var = "function " +var p1183 = &p1183Var +var p1192Var = "thunk from >" +var p1192 = &p1192Var +var p1201Var = "thunk from >>" +var p1201 = &p1201Var +var p1207Var = "function " +var p1207 = &p1207Var +var p1217Var = "object " +var p1217 = &p1217Var +var p1219Var = "object " +var p1219 = &p1219Var +var p1243Var = "object " +var p1243 = &p1243Var +var p1247Var = "object " +var p1247 = &p1247Var +var p1250Var = "object " +var p1250 = &p1250Var +var p1253Var = "object " +var p1253 = &p1253Var +var p1256Var = "object " +var p1256 = &p1256Var +var p1259Var = "object " +var p1259 = &p1259Var +var p1262Var = "object " +var p1262 = &p1262Var +var p1265Var = "object " +var p1265 = &p1265Var +var p1270Var = "thunk from >" +var p1270 = &p1270Var +var p1272Var = "thunk from >" +var p1272 = &p1272Var +var p1278Var = "function " +var p1278 = &p1278Var +var p1282Var = "thunk from >" +var p1282 = &p1282Var +var p1285Var = "function " var p1285 = &p1285Var -var p1307Var = "thunk from >" -var p1307 = &p1307Var -var p1329Var = "thunk from >" -var p1329 = &p1329Var -var p1338Var = "thunk from from >>" -var p1338 = &p1338Var -var p1344Var = "thunk from >" -var p1344 = &p1344Var -var p1353Var = "thunk from from >>" -var p1353 = &p1353Var -var p1359Var = "thunk from >" -var p1359 = &p1359Var -var p1379Var = "function " -var p1379 = &p1379Var -var p1396Var = "thunk from >" -var p1396 = &p1396Var -var p1400Var = "function " -var p1400 = &p1400Var -var p1435Var = "thunk from >" -var p1435 = &p1435Var -var p1441Var = "thunk from >" -var p1441 = &p1441Var -var p1454Var = "thunk from from >>" -var p1454 = &p1454Var -var p1462Var = "thunk from >" -var p1462 = &p1462Var -var p1495Var = "thunk from >" +var p1314Var = "thunk from >" +var p1314 = &p1314Var +var p1332Var = "thunk from >" +var p1332 = &p1332Var +var p1343Var = "thunk from >>" +var p1343 = &p1343Var +var p1354Var = "thunk from >>" +var p1354 = &p1354Var +var p1356Var = "function " +var p1356 = &p1356Var +var p1365Var = "thunk from >" +var p1365 = &p1365Var +var p1394Var = "thunk from >" +var p1394 = &p1394Var +var p1410Var = "thunk from >>" +var p1410 = &p1410Var +var p1425Var = "thunk from >" +var p1425 = &p1425Var +var p1433Var = "thunk from >>" +var p1433 = &p1433Var +var p1440Var = "object " +var p1440 = &p1440Var +var p1442Var = "object " +var p1442 = &p1442Var +var p1466Var = "object " +var p1466 = &p1466Var +var p1470Var = "object " +var p1470 = &p1470Var +var p1473Var = "object " +var p1473 = &p1473Var +var p1476Var = "object " +var p1476 = &p1476Var +var p1479Var = "object " +var p1479 = &p1479Var +var p1482Var = "object " +var p1482 = &p1482Var +var p1485Var = "object " +var p1485 = &p1485Var +var p1488Var = "object " +var p1488 = &p1488Var +var p1493Var = "thunk from >" +var p1493 = &p1493Var +var p1495Var = "thunk from >" var p1495 = &p1495Var -var p1526Var = "thunk from >" -var p1526 = &p1526Var -var p1537Var = "thunk from >>" -var p1537 = &p1537Var -var p1547Var = "thunk from >" -var p1547 = &p1547Var -var p1564Var = "function " -var p1564 = &p1564Var -var p1568Var = "thunk from >" -var p1568 = &p1568Var -var p1577Var = "thunk from >" -var p1577 = &p1577Var -var p1581Var = "function " -var p1581 = &p1581Var -var p1593Var = "thunk from >" -var p1593 = &p1593Var -var p1604Var = "thunk from >" -var p1604 = &p1604Var -var p1616Var = "thunk from >" -var p1616 = &p1616Var -var p1624Var = "thunk from >>" -var p1624 = &p1624Var -var p1638Var = "thunk from >" -var p1638 = &p1638Var -var p1648Var = "thunk from >>" -var p1648 = &p1648Var -var p1659Var = "thunk from >>>" -var p1659 = &p1659Var -var p1667Var = "function " -var p1667 = &p1667Var -var p1671Var = "thunk from >" -var p1671 = &p1671Var -var p1680Var = "thunk from >" -var p1680 = &p1680Var -var p1684Var = "function " -var p1684 = &p1684Var -var p1696Var = "thunk from >" -var p1696 = &p1696Var -var p1707Var = "thunk from >" -var p1707 = &p1707Var -var p1719Var = "thunk from >" -var p1719 = &p1719Var -var p1727Var = "thunk from >>" -var p1727 = &p1727Var -var p1741Var = "thunk from >" -var p1741 = &p1741Var -var p1751Var = "thunk from >>" -var p1751 = &p1751Var -var p1762Var = "thunk from >>>" +var p1501Var = "function " +var p1501 = &p1501Var +var p1522Var = "thunk from >" +var p1522 = &p1522Var +var p1539Var = "thunk from >" +var p1539 = &p1539Var +var p1562Var = "thunk from >" +var p1562 = &p1562Var +var p1579Var = "thunk from >" +var p1579 = &p1579Var +var p1602Var = "thunk from >" +var p1602 = &p1602Var +var p1619Var = "thunk from >" +var p1619 = &p1619Var +var p1630Var = "thunk from >" +var p1630 = &p1630Var +var p1640Var = "object " +var p1640 = &p1640Var +var p1642Var = "object " +var p1642 = &p1642Var +var p1666Var = "object " +var p1666 = &p1666Var +var p1670Var = "object " +var p1670 = &p1670Var +var p1673Var = "object " +var p1673 = &p1673Var +var p1676Var = "object " +var p1676 = &p1676Var +var p1679Var = "object " +var p1679 = &p1679Var +var p1682Var = "object " +var p1682 = &p1682Var +var p1685Var = "object " +var p1685 = &p1685Var +var p1688Var = "object " +var p1688 = &p1688Var +var p1693Var = "thunk from >" +var p1693 = &p1693Var +var p1695Var = "thunk from >" +var p1695 = &p1695Var +var p1701Var = "function " +var p1701 = &p1701Var +var p1722Var = "thunk from >" +var p1722 = &p1722Var +var p1739Var = "thunk from >" +var p1739 = &p1739Var +var p1762Var = "thunk from >" var p1762 = &p1762Var -var p1770Var = "function " -var p1770 = &p1770Var var p1779Var = "thunk from >" var p1779 = &p1779Var -var p1792Var = "function " -var p1792 = &p1792Var -var p1804Var = "function " -var p1804 = &p1804Var -var p1808Var = "thunk from >" -var p1808 = &p1808Var -var p1813Var = "object " -var p1813 = &p1813Var -var p1855Var = "thunk from >" -var p1855 = &p1855Var -var p1886Var = "thunk from >" -var p1886 = &p1886Var -var p1898Var = "thunk from >" -var p1898 = &p1898Var -var p1945Var = "thunk from >" -var p1945 = &p1945Var +var p1802Var = "thunk from >" +var p1802 = &p1802Var +var p1819Var = "thunk from >" +var p1819 = &p1819Var +var p1842Var = "thunk from >" +var p1842 = &p1842Var +var p1859Var = "thunk from >" +var p1859 = &p1859Var +var p1865Var = "thunk from >" +var p1865 = &p1865Var +var p1869Var = "function " +var p1869 = &p1869Var +var p1873Var = "thunk from >" +var p1873 = &p1873Var +var p1883Var = "thunk from >" +var p1883 = &p1883Var +var p1903Var = "thunk from >" +var p1903 = &p1903Var +var p1913Var = "thunk from >" +var p1913 = &p1913Var +var p1956Var = "thunk from >" +var p1956 = &p1956Var +var p1966Var = "thunk from >" +var p1966 = &p1966Var +var p1980Var = "thunk from >>" +var p1980 = &p1980Var +var p1989Var = "thunk from >" +var p1989 = &p1989Var var p2009Var = "thunk from >" var p2009 = &p2009Var -var p2031Var = "thunk from >" -var p2031 = &p2031Var -var p2054Var = "thunk from >" -var p2054 = &p2054Var -var p2060Var = "thunk from >" -var p2060 = &p2060Var -var p2064Var = "function " -var p2064 = &p2064Var -var p2094Var = "thunk from >" -var p2094 = &p2094Var -var p2131Var = "thunk from >>" -var p2131 = &p2131Var -var p2155Var = "thunk from >" -var p2155 = &p2155Var -var p2184Var = "function " -var p2184 = &p2184Var -var p2193Var = "thunk from >" -var p2193 = &p2193Var -var p2202Var = "thunk from >>" -var p2202 = &p2202Var -var p2214Var = "function " -var p2214 = &p2214Var -var p2226Var = "function " -var p2226 = &p2226Var -var p2247Var = "thunk from >" -var p2247 = &p2247Var -var p2267Var = "thunk from >" -var p2267 = &p2267Var -var p2279Var = "thunk from >" -var p2279 = &p2279Var -var p2302Var = "thunk from >" -var p2302 = &p2302Var -var p2314Var = "thunk from >" -var p2314 = &p2314Var -var p2338Var = "thunk from >" -var p2338 = &p2338Var -var p2350Var = "thunk from >" -var p2350 = &p2350Var -var p2359Var = "function " -var p2359 = &p2359Var -var p2380Var = "thunk from >" -var p2380 = &p2380Var -var p2397Var = "thunk from >" -var p2397 = &p2397Var -var p2422Var = "thunk from >" -var p2422 = &p2422Var -var p2444Var = "thunk from >" +var p2020Var = "object " +var p2020 = &p2020Var +var p2022Var = "object " +var p2022 = &p2022Var +var p2046Var = "object " +var p2046 = &p2046Var +var p2050Var = "object " +var p2050 = &p2050Var +var p2053Var = "object " +var p2053 = &p2053Var +var p2056Var = "object " +var p2056 = &p2056Var +var p2059Var = "object " +var p2059 = &p2059Var +var p2062Var = "object " +var p2062 = &p2062Var +var p2065Var = "object " +var p2065 = &p2065Var +var p2068Var = "object " +var p2068 = &p2068Var +var p2073Var = "thunk from >" +var p2073 = &p2073Var +var p2075Var = "thunk from >" +var p2075 = &p2075Var +var p2091Var = "function " +var p2091 = &p2091Var +var p2100Var = "thunk from >" +var p2100 = &p2100Var +var p2122Var = "thunk from >" +var p2122 = &p2122Var +var p2144Var = "thunk from >" +var p2144 = &p2144Var +var p2166Var = "thunk from >" +var p2166 = &p2166Var +var p2175Var = "thunk from from >>" +var p2175 = &p2175Var +var p2181Var = "thunk from >" +var p2181 = &p2181Var +var p2190Var = "thunk from from >>" +var p2190 = &p2190Var +var p2196Var = "thunk from >" +var p2196 = &p2196Var +var p2216Var = "function " +var p2216 = &p2216Var +var p2233Var = "thunk from >" +var p2233 = &p2233Var +var p2237Var = "function " +var p2237 = &p2237Var +var p2272Var = "thunk from >" +var p2272 = &p2272Var +var p2278Var = "thunk from >" +var p2278 = &p2278Var +var p2291Var = "thunk from from >>" +var p2291 = &p2291Var +var p2299Var = "thunk from >" +var p2299 = &p2299Var +var p2332Var = "thunk from >" +var p2332 = &p2332Var +var p2363Var = "thunk from >" +var p2363 = &p2363Var +var p2374Var = "thunk from >>" +var p2374 = &p2374Var +var p2384Var = "thunk from >" +var p2384 = &p2384Var +var p2399Var = "object " +var p2399 = &p2399Var +var p2401Var = "object " +var p2401 = &p2401Var +var p2425Var = "object " +var p2425 = &p2425Var +var p2429Var = "object " +var p2429 = &p2429Var +var p2432Var = "object " +var p2432 = &p2432Var +var p2435Var = "object " +var p2435 = &p2435Var +var p2438Var = "object " +var p2438 = &p2438Var +var p2441Var = "object " +var p2441 = &p2441Var +var p2444Var = "object " var p2444 = &p2444Var -var p2461Var = "thunk from >" -var p2461 = &p2461Var -var p2472Var = "thunk from >" -var p2472 = &p2472Var -var p2481Var = "thunk from >>" -var p2481 = &p2481Var -var p2487Var = "function " -var p2487 = &p2487Var -var p2493Var = "thunk from >" -var p2493 = &p2493Var -var p2505Var = "function " -var p2505 = &p2505Var -var p2526Var = "thunk from >" -var p2526 = &p2526Var -var p2543Var = "thunk from >" -var p2543 = &p2543Var -var p2568Var = "thunk from >" -var p2568 = &p2568Var -var p2590Var = "thunk from >" -var p2590 = &p2590Var -var p2607Var = "thunk from >" -var p2607 = &p2607Var -var p2618Var = "thunk from >" -var p2618 = &p2618Var -var p2627Var = "thunk from >>" -var p2627 = &p2627Var -var p2633Var = "function " -var p2633 = &p2633Var -var p2639Var = "thunk from >" +var p2447Var = "object " +var p2447 = &p2447Var +var p2452Var = "thunk from >" +var p2452 = &p2452Var +var p2454Var = "thunk from >" +var p2454 = &p2454Var +var p2460Var = "function " +var p2460 = &p2460Var +var p2464Var = "thunk from >" +var p2464 = &p2464Var +var p2473Var = "thunk from >" +var p2473 = &p2473Var +var p2477Var = "function " +var p2477 = &p2477Var +var p2489Var = "thunk from >" +var p2489 = &p2489Var +var p2500Var = "thunk from >" +var p2500 = &p2500Var +var p2512Var = "thunk from >" +var p2512 = &p2512Var +var p2520Var = "thunk from >>" +var p2520 = &p2520Var +var p2534Var = "thunk from >" +var p2534 = &p2534Var +var p2544Var = "thunk from >>" +var p2544 = &p2544Var +var p2555Var = "thunk from >>>" +var p2555 = &p2555Var +var p2561Var = "object " +var p2561 = &p2561Var +var p2563Var = "object " +var p2563 = &p2563Var +var p2587Var = "object " +var p2587 = &p2587Var +var p2591Var = "object " +var p2591 = &p2591Var +var p2594Var = "object " +var p2594 = &p2594Var +var p2597Var = "object " +var p2597 = &p2597Var +var p2600Var = "object " +var p2600 = &p2600Var +var p2603Var = "object " +var p2603 = &p2603Var +var p2606Var = "object " +var p2606 = &p2606Var +var p2609Var = "object " +var p2609 = &p2609Var +var p2614Var = "thunk from >" +var p2614 = &p2614Var +var p2616Var = "thunk from >" +var p2616 = &p2616Var +var p2622Var = "function " +var p2622 = &p2622Var +var p2626Var = "thunk from >" +var p2626 = &p2626Var +var p2635Var = "thunk from >" +var p2635 = &p2635Var +var p2639Var = "function " var p2639 = &p2639Var -var p2653Var = "function " -var p2653 = &p2653Var -var p2674Var = "thunk from >" +var p2651Var = "thunk from >" +var p2651 = &p2651Var +var p2662Var = "thunk from >" +var p2662 = &p2662Var +var p2674Var = "thunk from >" var p2674 = &p2674Var -var p2691Var = "thunk from >" -var p2691 = &p2691Var -var p2714Var = "thunk from >" -var p2714 = &p2714Var -var p2731Var = "thunk from >" -var p2731 = &p2731Var -var p2761Var = "object " -var p2761 = &p2761Var -var p2767Var = "thunk from >" -var p2767 = &p2767Var -var p2784Var = "thunk from >" +var p2682Var = "thunk from >>" +var p2682 = &p2682Var +var p2696Var = "thunk from >" +var p2696 = &p2696Var +var p2706Var = "thunk from >>" +var p2706 = &p2706Var +var p2717Var = "thunk from >>>" +var p2717 = &p2717Var +var p2723Var = "object " +var p2723 = &p2723Var +var p2725Var = "object " +var p2725 = &p2725Var +var p2749Var = "object " +var p2749 = &p2749Var +var p2753Var = "object " +var p2753 = &p2753Var +var p2756Var = "object " +var p2756 = &p2756Var +var p2759Var = "object " +var p2759 = &p2759Var +var p2762Var = "object " +var p2762 = &p2762Var +var p2765Var = "object " +var p2765 = &p2765Var +var p2768Var = "object " +var p2768 = &p2768Var +var p2771Var = "object " +var p2771 = &p2771Var +var p2776Var = "thunk from >" +var p2776 = &p2776Var +var p2778Var = "thunk from >" +var p2778 = &p2778Var +var p2784Var = "function " var p2784 = &p2784Var -var p2792Var = "function " -var p2792 = &p2792Var -var p2796Var = "thunk from >" -var p2796 = &p2796Var -var p2800Var = "function " -var p2800 = &p2800Var -var p2815Var = "thunk from >" -var p2815 = &p2815Var -var p2842Var = "thunk from >" +var p2793Var = "thunk from >" +var p2793 = &p2793Var +var p2806Var = "function " +var p2806 = &p2806Var +var p2816Var = "object " +var p2816 = &p2816Var +var p2818Var = "object " +var p2818 = &p2818Var +var p2842Var = "object " var p2842 = &p2842Var -var p2874Var = "thunk from >" -var p2874 = &p2874Var -var p2889Var = "thunk from >" -var p2889 = &p2889Var -var p2906Var = "thunk from >" -var p2906 = &p2906Var -var p2915Var = "thunk from >>" -var p2915 = &p2915Var -var p2928Var = "thunk from >>" +var p2846Var = "object " +var p2846 = &p2846Var +var p2849Var = "object " +var p2849 = &p2849Var +var p2852Var = "object " +var p2852 = &p2852Var +var p2855Var = "object " +var p2855 = &p2855Var +var p2858Var = "object " +var p2858 = &p2858Var +var p2861Var = "object " +var p2861 = &p2861Var +var p2864Var = "object " +var p2864 = &p2864Var +var p2869Var = "thunk from >" +var p2869 = &p2869Var +var p2871Var = "thunk from >" +var p2871 = &p2871Var +var p2877Var = "function " +var p2877 = &p2877Var +var p2881Var = "thunk from >" +var p2881 = &p2881Var +var p2886Var = "object " +var p2886 = &p2886Var +var p2928Var = "thunk from >" var p2928 = &p2928Var -var p2944Var = "thunk from >" -var p2944 = &p2944Var -var p2968Var = "thunk from >" -var p2968 = &p2968Var -var p3011Var = "thunk from >" -var p3011 = &p3011Var -var p3028Var = "thunk from >" -var p3028 = &p3028Var -var p3049Var = "thunk from >" -var p3049 = &p3049Var -var p3058Var = "thunk from >" -var p3058 = &p3058Var +var p2959Var = "thunk from >" +var p2959 = &p2959Var +var p2971Var = "thunk from >" +var p2971 = &p2971Var +var p3018Var = "thunk from >" +var p3018 = &p3018Var var p3082Var = "thunk from >" var p3082 = &p3082Var -var p3091Var = "thunk from >" -var p3091 = &p3091Var -var p3110Var = "thunk from >" -var p3110 = &p3110Var -var p3118Var = "function " -var p3118 = &p3118Var +var p3104Var = "thunk from >" +var p3104 = &p3104Var var p3127Var = "thunk from >" var p3127 = &p3127Var -var p3135Var = "thunk from >>" -var p3135 = &p3135Var -var p3142Var = "function " -var p3142 = &p3142Var -var p3153Var = "thunk from >" -var p3153 = &p3153Var -var p3168Var = "thunk from >" -var p3168 = &p3168Var -var p3179Var = "thunk from >" -var p3179 = &p3179Var -var p3195Var = "thunk from >>" -var p3195 = &p3195Var -var p3204Var = "thunk from >>>" +var p3133Var = "thunk from >" +var p3133 = &p3133Var +var p3137Var = "function " +var p3137 = &p3137Var +var p3167Var = "thunk from >" +var p3167 = &p3167Var +var p3204Var = "thunk from >>" var p3204 = &p3204Var var p3228Var = "thunk from >" var p3228 = &p3228Var -var p3236Var = "function " -var p3236 = &p3236Var -var p3240Var = "thunk from >" -var p3240 = &p3240Var -var p3244Var = "function " -var p3244 = &p3244Var -var p3259Var = "thunk from >" -var p3259 = &p3259Var -var p3267Var = "thunk from >" -var p3267 = &p3267Var -var p3290Var = "thunk from >" -var p3290 = &p3290Var -var p3294Var = "function " +var p3255Var = "object " +var p3255 = &p3255Var +var p3257Var = "object " +var p3257 = &p3257Var +var p3281Var = "object " +var p3281 = &p3281Var +var p3285Var = "object " +var p3285 = &p3285Var +var p3288Var = "object " +var p3288 = &p3288Var +var p3291Var = "object " +var p3291 = &p3291Var +var p3294Var = "object " var p3294 = &p3294Var -var p3309Var = "thunk from >" -var p3309 = &p3309Var -var p3317Var = "thunk from >" -var p3317 = &p3317Var -var p3344Var = "thunk from >" -var p3344 = &p3344Var -var p3362Var = "object " -var p3362 = &p3362Var -var p3376Var = "thunk from >" -var p3376 = &p3376Var +var p3297Var = "object " +var p3297 = &p3297Var +var p3300Var = "object " +var p3300 = &p3300Var +var p3303Var = "object " +var p3303 = &p3303Var +var p3308Var = "thunk from >" +var p3308 = &p3308Var +var p3310Var = "thunk from >" +var p3310 = &p3310Var +var p3316Var = "function " +var p3316 = &p3316Var +var p3325Var = "thunk from >" +var p3325 = &p3325Var +var p3334Var = "thunk from >>" +var p3334 = &p3334Var +var p3346Var = "function " +var p3346 = &p3346Var +var p3356Var = "object " +var p3356 = &p3356Var +var p3358Var = "object " +var p3358 = &p3358Var +var p3382Var = "object " +var p3382 = &p3382Var +var p3386Var = "object " +var p3386 = &p3386Var var p3389Var = "object " var p3389 = &p3389Var -var p3397Var = "thunk from >" -var p3397 = &p3397Var -var p3401Var = "function " +var p3392Var = "object " +var p3392 = &p3392Var +var p3395Var = "object " +var p3395 = &p3395Var +var p3398Var = "object " +var p3398 = &p3398Var +var p3401Var = "object " var p3401 = &p3401Var -var p3405Var = "thunk from >" -var p3405 = &p3405Var -var p3409Var = "function " +var p3404Var = "object " +var p3404 = &p3404Var +var p3409Var = "thunk from >" var p3409 = &p3409Var -var p3424Var = "thunk from >" -var p3424 = &p3424Var -var p3432Var = "thunk from >" -var p3432 = &p3432Var -var p3457Var = "thunk from >" -var p3457 = &p3457Var -var p3472Var = "object " -var p3472 = &p3472Var -var p3492Var = "thunk from >" -var p3492 = &p3492Var -var p3507Var = "object " -var p3507 = &p3507Var -var p3527Var = "thunk from >" -var p3527 = &p3527Var -var p3542Var = "object " -var p3542 = &p3542Var -var p3562Var = "thunk from >" -var p3562 = &p3562Var -var p3577Var = "object " -var p3577 = &p3577Var -var p3597Var = "thunk from >" -var p3597 = &p3597Var -var p3612Var = "object " -var p3612 = &p3612Var -var p3618Var = "object " -var p3618 = &p3618Var -var p3629Var = "thunk from >" -var p3629 = &p3629Var -var p3637Var = "object " -var p3637 = &p3637Var -var p3650Var = "thunk from >" -var p3650 = &p3650Var -var p3654Var = "function " -var p3654 = &p3654Var -var p3671Var = "thunk from >" -var p3671 = &p3671Var -var p3693Var = "object " -var p3693 = &p3693Var -var p3704Var = "thunk from >" -var p3704 = &p3704Var -var p3708Var = "function " -var p3708 = &p3708Var -var p3723Var = "thunk from >" -var p3723 = &p3723Var -var p3731Var = "thunk from >" +var p3411Var = "thunk from >" +var p3411 = &p3411Var +var p3417Var = "function " +var p3417 = &p3417Var +var p3438Var = "thunk from >" +var p3438 = &p3438Var +var p3458Var = "thunk from >" +var p3458 = &p3458Var +var p3470Var = "thunk from >" +var p3470 = &p3470Var +var p3493Var = "thunk from >" +var p3493 = &p3493Var +var p3505Var = "thunk from >" +var p3505 = &p3505Var +var p3529Var = "thunk from >" +var p3529 = &p3529Var +var p3541Var = "thunk from >" +var p3541 = &p3541Var +var p3548Var = "object " +var p3548 = &p3548Var +var p3550Var = "object " +var p3550 = &p3550Var +var p3574Var = "object " +var p3574 = &p3574Var +var p3578Var = "object " +var p3578 = &p3578Var +var p3581Var = "object " +var p3581 = &p3581Var +var p3584Var = "object " +var p3584 = &p3584Var +var p3587Var = "object " +var p3587 = &p3587Var +var p3590Var = "object " +var p3590 = &p3590Var +var p3593Var = "object " +var p3593 = &p3593Var +var p3596Var = "object " +var p3596 = &p3596Var +var p3601Var = "thunk from >" +var p3601 = &p3601Var +var p3603Var = "thunk from >" +var p3603 = &p3603Var +var p3609Var = "function " +var p3609 = &p3609Var +var p3630Var = "thunk from >" +var p3630 = &p3630Var +var p3647Var = "thunk from >" +var p3647 = &p3647Var +var p3672Var = "thunk from >" +var p3672 = &p3672Var +var p3694Var = "thunk from >" +var p3694 = &p3694Var +var p3711Var = "thunk from >" +var p3711 = &p3711Var +var p3722Var = "thunk from >" +var p3722 = &p3722Var +var p3731Var = "thunk from >>" var p3731 = &p3731Var -var p3756Var = "thunk from >" -var p3756 = &p3756Var -var p3790Var = "thunk from >" -var p3790 = &p3790Var -var p3824Var = "thunk from >" -var p3824 = &p3824Var -var p3858Var = "thunk from >" -var p3858 = &p3858Var -var p3892Var = "thunk from >" -var p3892 = &p3892Var -var p3926Var = "thunk from >" -var p3926 = &p3926Var -var p3960Var = "thunk from >" +var p3737Var = "function " +var p3737 = &p3737Var +var p3743Var = "thunk from >" +var p3743 = &p3743Var +var p3753Var = "object " +var p3753 = &p3753Var +var p3755Var = "object " +var p3755 = &p3755Var +var p3779Var = "object " +var p3779 = &p3779Var +var p3783Var = "object " +var p3783 = &p3783Var +var p3786Var = "object " +var p3786 = &p3786Var +var p3789Var = "object " +var p3789 = &p3789Var +var p3792Var = "object " +var p3792 = &p3792Var +var p3795Var = "object " +var p3795 = &p3795Var +var p3798Var = "object " +var p3798 = &p3798Var +var p3801Var = "object " +var p3801 = &p3801Var +var p3806Var = "thunk from >" +var p3806 = &p3806Var +var p3808Var = "thunk from >" +var p3808 = &p3808Var +var p3814Var = "function " +var p3814 = &p3814Var +var p3835Var = "thunk from >" +var p3835 = &p3835Var +var p3852Var = "thunk from >" +var p3852 = &p3852Var +var p3877Var = "thunk from >" +var p3877 = &p3877Var +var p3899Var = "thunk from >" +var p3899 = &p3899Var +var p3916Var = "thunk from >" +var p3916 = &p3916Var +var p3927Var = "thunk from >" +var p3927 = &p3927Var +var p3936Var = "thunk from >>" +var p3936 = &p3936Var +var p3942Var = "function " +var p3942 = &p3942Var +var p3948Var = "thunk from >" +var p3948 = &p3948Var +var p3960Var = "object " var p3960 = &p3960Var -var p3994Var = "thunk from >" -var p3994 = &p3994Var -var p4028Var = "thunk from >" -var p4028 = &p4028Var -var p4062Var = "thunk from >" -var p4062 = &p4062Var -var p4082Var = "object " +var p3962Var = "object " +var p3962 = &p3962Var +var p3986Var = "object " +var p3986 = &p3986Var +var p3990Var = "object " +var p3990 = &p3990Var +var p3993Var = "object " +var p3993 = &p3993Var +var p3996Var = "object " +var p3996 = &p3996Var +var p3999Var = "object " +var p3999 = &p3999Var +var p4002Var = "object " +var p4002 = &p4002Var +var p4005Var = "object " +var p4005 = &p4005Var +var p4008Var = "object " +var p4008 = &p4008Var +var p4013Var = "thunk from >" +var p4013 = &p4013Var +var p4015Var = "thunk from >" +var p4015 = &p4015Var +var p4021Var = "function " +var p4021 = &p4021Var +var p4042Var = "thunk from >" +var p4042 = &p4042Var +var p4059Var = "thunk from >" +var p4059 = &p4059Var +var p4082Var = "thunk from >" var p4082 = &p4082Var -var p4093Var = "thunk from >" -var p4093 = &p4093Var -var p4102Var = "thunk from >" -var p4102 = &p4102Var -var p4106Var = "function " -var p4106 = &p4106Var -var p4121Var = "thunk from >" -var p4121 = &p4121Var -var p4129Var = "thunk from >" +var p4099Var = "thunk from >" +var p4099 = &p4099Var +var p4129Var = "object " var p4129 = &p4129Var -var p4154Var = "thunk from >" -var p4154 = &p4154Var -var p4166Var = "object " -var p4166 = &p4166Var -var p4174Var = "thunk from >" -var p4174 = &p4174Var -var p4178Var = "function " -var p4178 = &p4178Var -var p4193Var = "thunk from >" -var p4193 = &p4193Var -var p4201Var = "thunk from >" -var p4201 = &p4201Var -var p4257Var = "thunk from >" -var p4257 = &p4257Var -var p4261Var = "function " -var p4261 = &p4261Var -var p4276Var = "thunk from >" -var p4276 = &p4276Var -var p4284Var = "thunk from >" -var p4284 = &p4284Var -var p4334Var = "object " -var p4334 = &p4334Var -var p4361Var = "object " -var p4361 = &p4361Var -var p4388Var = "object " -var p4388 = &p4388Var -var p4415Var = "object " -var p4415 = &p4415Var -var p4442Var = "object " -var p4442 = &p4442Var -var p4469Var = "object " -var p4469 = &p4469Var -var p4496Var = "object " -var p4496 = &p4496Var -var p4523Var = "object " -var p4523 = &p4523Var -var p4550Var = "object " -var p4550 = &p4550Var -var p4577Var = "object " -var p4577 = &p4577Var -var p4604Var = "object " +var p4135Var = "thunk from >" +var p4135 = &p4135Var +var p4152Var = "thunk from >" +var p4152 = &p4152Var +var p4158Var = "object " +var p4158 = &p4158Var +var p4160Var = "object " +var p4160 = &p4160Var +var p4184Var = "object " +var p4184 = &p4184Var +var p4188Var = "object " +var p4188 = &p4188Var +var p4191Var = "object " +var p4191 = &p4191Var +var p4194Var = "object " +var p4194 = &p4194Var +var p4197Var = "object " +var p4197 = &p4197Var +var p4200Var = "object " +var p4200 = &p4200Var +var p4203Var = "object " +var p4203 = &p4203Var +var p4206Var = "object " +var p4206 = &p4206Var +var p4211Var = "thunk from >" +var p4211 = &p4211Var +var p4213Var = "thunk from >" +var p4213 = &p4213Var +var p4219Var = "function " +var p4219 = &p4219Var +var p4223Var = "thunk from >" +var p4223 = &p4223Var +var p4227Var = "function " +var p4227 = &p4227Var +var p4242Var = "thunk from >" +var p4242 = &p4242Var +var p4269Var = "thunk from >" +var p4269 = &p4269Var +var p4301Var = "thunk from >" +var p4301 = &p4301Var +var p4316Var = "thunk from >" +var p4316 = &p4316Var +var p4333Var = "thunk from >" +var p4333 = &p4333Var +var p4342Var = "thunk from >>" +var p4342 = &p4342Var +var p4355Var = "thunk from >>" +var p4355 = &p4355Var +var p4371Var = "thunk from >" +var p4371 = &p4371Var +var p4395Var = "thunk from >" +var p4395 = &p4395Var +var p4438Var = "thunk from >" +var p4438 = &p4438Var +var p4455Var = "thunk from >" +var p4455 = &p4455Var +var p4476Var = "thunk from >" +var p4476 = &p4476Var +var p4485Var = "thunk from >" +var p4485 = &p4485Var +var p4509Var = "thunk from >" +var p4509 = &p4509Var +var p4518Var = "thunk from >" +var p4518 = &p4518Var +var p4537Var = "thunk from >" +var p4537 = &p4537Var +var p4543Var = "object " +var p4543 = &p4543Var +var p4545Var = "object " +var p4545 = &p4545Var +var p4569Var = "object " +var p4569 = &p4569Var +var p4573Var = "object " +var p4573 = &p4573Var +var p4576Var = "object " +var p4576 = &p4576Var +var p4579Var = "object " +var p4579 = &p4579Var +var p4582Var = "object " +var p4582 = &p4582Var +var p4585Var = "object " +var p4585 = &p4585Var +var p4588Var = "object " +var p4588 = &p4588Var +var p4591Var = "object " +var p4591 = &p4591Var +var p4596Var = "thunk from >" +var p4596 = &p4596Var +var p4598Var = "thunk from >" +var p4598 = &p4598Var +var p4604Var = "function " var p4604 = &p4604Var -var p4631Var = "object " -var p4631 = &p4631Var -var p4658Var = "object " -var p4658 = &p4658Var -var p4678Var = "thunk from >" -var p4678 = &p4678Var -var p4682Var = "function " -var p4682 = &p4682Var -var p4697Var = "thunk from >" -var p4697 = &p4697Var -var p4705Var = "thunk from >" -var p4705 = &p4705Var -var p4711Var = "thunk from from >>" -var p4711 = &p4711Var -var p4719Var = "thunk from >" -var p4719 = &p4719Var -var p4725Var = "thunk from from >>" -var p4725 = &p4725Var -var p4736Var = "thunk from >" -var p4736 = &p4736Var -var p4742Var = "thunk from from >>" -var p4742 = &p4742Var -var p4753Var = "thunk from >" -var p4753 = &p4753Var -var p4759Var = "thunk from from >>" -var p4759 = &p4759Var -var p4770Var = "thunk from >" -var p4770 = &p4770Var -var p4776Var = "thunk from from >>" -var p4776 = &p4776Var -var p4787Var = "thunk from >" -var p4787 = &p4787Var -var p4793Var = "thunk from from >>" -var p4793 = &p4793Var -var p4802Var = "object " -var p4802 = &p4802Var -var p4813Var = "object " -var p4813 = &p4813Var -var p4852Var = "thunk from >" -var p4852 = &p4852Var -var p4856Var = "function " -var p4856 = &p4856Var -var p4871Var = "thunk from >" +var p4613Var = "thunk from >" +var p4613 = &p4613Var +var p4621Var = "thunk from >>" +var p4621 = &p4621Var +var p4626Var = "object " +var p4626 = &p4626Var +var p4628Var = "object " +var p4628 = &p4628Var +var p4652Var = "object " +var p4652 = &p4652Var +var p4656Var = "object " +var p4656 = &p4656Var +var p4659Var = "object " +var p4659 = &p4659Var +var p4662Var = "object " +var p4662 = &p4662Var +var p4665Var = "object " +var p4665 = &p4665Var +var p4668Var = "object " +var p4668 = &p4668Var +var p4671Var = "object " +var p4671 = &p4671Var +var p4674Var = "object " +var p4674 = &p4674Var +var p4679Var = "thunk from >" +var p4679 = &p4679Var +var p4681Var = "thunk from >" +var p4681 = &p4681Var +var p4687Var = "function " +var p4687 = &p4687Var +var p4698Var = "thunk from >" +var p4698 = &p4698Var +var p4713Var = "thunk from >" +var p4713 = &p4713Var +var p4724Var = "thunk from >" +var p4724 = &p4724Var +var p4740Var = "thunk from >>" +var p4740 = &p4740Var +var p4749Var = "thunk from >>>" +var p4749 = &p4749Var +var p4773Var = "thunk from >" +var p4773 = &p4773Var +var p4779Var = "object " +var p4779 = &p4779Var +var p4781Var = "object " +var p4781 = &p4781Var +var p4805Var = "object " +var p4805 = &p4805Var +var p4809Var = "object " +var p4809 = &p4809Var +var p4812Var = "object " +var p4812 = &p4812Var +var p4815Var = "object " +var p4815 = &p4815Var +var p4818Var = "object " +var p4818 = &p4818Var +var p4821Var = "object " +var p4821 = &p4821Var +var p4824Var = "object " +var p4824 = &p4824Var +var p4827Var = "object " +var p4827 = &p4827Var +var p4832Var = "thunk from >" +var p4832 = &p4832Var +var p4834Var = "thunk from >" +var p4834 = &p4834Var +var p4840Var = "function " +var p4840 = &p4840Var +var p4844Var = "thunk from >" +var p4844 = &p4844Var +var p4848Var = "function " +var p4848 = &p4848Var +var p4863Var = "thunk from >" +var p4863 = &p4863Var +var p4871Var = "thunk from >" var p4871 = &p4871Var -var p4881Var = "thunk from >" -var p4881 = &p4881Var -var p4887Var = "thunk from >" -var p4887 = &p4887Var -var p4910Var = "thunk from >" -var p4910 = &p4910Var -var p4916Var = "thunk from from >>" -var p4916 = &p4916Var -var p4929Var = "thunk from >" -var p4929 = &p4929Var -var p4944Var = "thunk from >>" -var p4944 = &p4944Var -var p4958Var = "thunk from >" -var p4958 = &p4958Var -var p4977Var = "thunk from >" -var p4977 = &p4977Var -var p4983Var = "thunk from from >>" -var p4983 = &p4983Var -var p4992Var = "thunk from >" -var p4992 = &p4992Var -var p4995Var = "function " -var p4995 = &p4995Var -var p4999Var = "thunk from >" -var p4999 = &p4999Var -var p5003Var = "function " -var p5003 = &p5003Var -var p5018Var = "thunk from >" -var p5018 = &p5018Var -var p5035Var = "thunk from >" -var p5035 = &p5035Var -var p5042Var = "thunk from >" -var p5042 = &p5042Var -var p5046Var = "function " -var p5046 = &p5046Var -var p5054Var = "thunk from >" -var p5054 = &p5054Var -var p5067Var = "thunk from >>" -var p5067 = &p5067Var -var p5077Var = "thunk from >" -var p5077 = &p5077Var -var p5081Var = "function " -var p5081 = &p5081Var -var p5091Var = "thunk from >" -var p5091 = &p5091Var -var p5104Var = "thunk from >>" -var p5104 = &p5104Var -var p5112Var = "thunk from >" -var p5112 = &p5112Var -var p5116Var = "function " -var p5116 = &p5116Var -var p5120Var = "thunk from >" -var p5120 = &p5120Var -var p5129Var = "thunk from from >>" -var p5129 = &p5129Var -var p5135Var = "thunk from >" -var p5135 = &p5135Var -var p5139Var = "function " -var p5139 = &p5139Var -var p5162Var = "thunk from >" -var p5162 = &p5162Var -var p5171Var = "thunk from >>" -var p5171 = &p5171Var -var p5193Var = "thunk from >" -var p5193 = &p5193Var -var p5212Var = "thunk from from >>" -var p5212 = &p5212Var -var p5222Var = "thunk from from >>" +var p4894Var = "thunk from >" +var p4894 = &p4894Var +var p4898Var = "function " +var p4898 = &p4898Var +var p4913Var = "thunk from >" +var p4913 = &p4913Var +var p4921Var = "thunk from >" +var p4921 = &p4921Var +var p4948Var = "thunk from >" +var p4948 = &p4948Var +var p4966Var = "object " +var p4966 = &p4966Var +var p4980Var = "thunk from >" +var p4980 = &p4980Var +var p4993Var = "object " +var p4993 = &p4993Var +var p5001Var = "thunk from >" +var p5001 = &p5001Var +var p5005Var = "function " +var p5005 = &p5005Var +var p5009Var = "thunk from >" +var p5009 = &p5009Var +var p5013Var = "function " +var p5013 = &p5013Var +var p5028Var = "thunk from >" +var p5028 = &p5028Var +var p5036Var = "thunk from >" +var p5036 = &p5036Var +var p5061Var = "thunk from >" +var p5061 = &p5061Var +var p5076Var = "object " +var p5076 = &p5076Var +var p5096Var = "thunk from >" +var p5096 = &p5096Var +var p5111Var = "object " +var p5111 = &p5111Var +var p5131Var = "thunk from >" +var p5131 = &p5131Var +var p5146Var = "object " +var p5146 = &p5146Var +var p5166Var = "thunk from >" +var p5166 = &p5166Var +var p5181Var = "object " +var p5181 = &p5181Var +var p5201Var = "thunk from >" +var p5201 = &p5201Var +var p5216Var = "object " +var p5216 = &p5216Var +var p5222Var = "object " var p5222 = &p5222Var -var p5231Var = "thunk from from >>>" -var p5231 = &p5231Var -var p5237Var = "thunk from >" -var p5237 = &p5237Var -var p5246Var = "thunk from >" -var p5246 = &p5246Var -var p5268Var = "thunk from >" -var p5268 = &p5268Var -var p5277Var = "thunk from from >>" -var p5277 = &p5277Var -var p5285Var = "thunk from >" -var p5285 = &p5285Var -var p5291Var = "thunk from from >>" -var p5291 = &p5291Var -var p5320Var = "thunk from >" -var p5320 = &p5320Var -var p5324Var = "function " -var p5324 = &p5324Var -var p5328Var = "thunk from >" -var p5328 = &p5328Var -var p5333Var = "thunk from from >>" -var p5333 = &p5333Var -var p5350Var = "thunk from from >>" -var p5350 = &p5350Var -var p5359Var = "thunk from from >>" -var p5359 = &p5359Var -var p5369Var = "thunk from >" -var p5369 = &p5369Var -var p5378Var = "thunk from from >>" -var p5378 = &p5378Var -var p5384Var = "thunk from >" -var p5384 = &p5384Var -var p5388Var = "function " -var p5388 = &p5388Var -var p5410Var = "thunk from >" -var p5410 = &p5410Var -var p5419Var = "thunk from >>" -var p5419 = &p5419Var -var p5443Var = "thunk from >" -var p5443 = &p5443Var -var p5462Var = "thunk from from >>" +var p5233Var = "thunk from >" +var p5233 = &p5233Var +var p5241Var = "object " +var p5241 = &p5241Var +var p5254Var = "thunk from >" +var p5254 = &p5254Var +var p5258Var = "function " +var p5258 = &p5258Var +var p5275Var = "thunk from >" +var p5275 = &p5275Var +var p5297Var = "object " +var p5297 = &p5297Var +var p5308Var = "thunk from >" +var p5308 = &p5308Var +var p5312Var = "function " +var p5312 = &p5312Var +var p5327Var = "thunk from >" +var p5327 = &p5327Var +var p5335Var = "thunk from >" +var p5335 = &p5335Var +var p5360Var = "thunk from >" +var p5360 = &p5360Var +var p5394Var = "thunk from >" +var p5394 = &p5394Var +var p5428Var = "thunk from >" +var p5428 = &p5428Var +var p5462Var = "thunk from >" var p5462 = &p5462Var -var p5472Var = "thunk from from >>" -var p5472 = &p5472Var -var p5481Var = "thunk from from >>>" -var p5481 = &p5481Var -var p5487Var = "thunk from >" -var p5487 = &p5487Var -var p5496Var = "thunk from >" +var p5496Var = "thunk from >" var p5496 = &p5496Var -var p5526Var = "thunk from >" -var p5526 = &p5526Var -var p5535Var = "thunk from from >>" -var p5535 = &p5535Var -var p5543Var = "thunk from >" -var p5543 = &p5543Var -var p5562Var = "thunk from from >>" -var p5562 = &p5562Var -var p5591Var = "thunk from >" -var p5591 = &p5591Var -var p5595Var = "function " -var p5595 = &p5595Var -var p5599Var = "thunk from >" -var p5599 = &p5599Var -var p5603Var = "function " -var p5603 = &p5603Var -var p5634Var = "thunk from >" -var p5634 = &p5634Var -var p5650Var = "thunk from >" -var p5650 = &p5650Var -var p5664Var = "thunk from >" -var p5664 = &p5664Var -var p5677Var = "thunk from >>" -var p5677 = &p5677Var -var p5684Var = "thunk from >" -var p5684 = &p5684Var -var p5688Var = "function " -var p5688 = &p5688Var -var p5692Var = "thunk from >" -var p5692 = &p5692Var -var p5701Var = "thunk from from >>" -var p5701 = &p5701Var -var p5707Var = "thunk from >" -var p5707 = &p5707Var -var p5716Var = "thunk from from >>" -var p5716 = &p5716Var -var p5722Var = "thunk from >" -var p5722 = &p5722Var -var p5747Var = "thunk from >" -var p5747 = &p5747Var -var p5761Var = "thunk from >" -var p5761 = &p5761Var -var p5767Var = "thunk from from >>" -var p5767 = &p5767Var -var p5778Var = "thunk from from >>>" +var p5530Var = "thunk from >" +var p5530 = &p5530Var +var p5564Var = "thunk from >" +var p5564 = &p5564Var +var p5598Var = "thunk from >" +var p5598 = &p5598Var +var p5632Var = "thunk from >" +var p5632 = &p5632Var +var p5666Var = "thunk from >" +var p5666 = &p5666Var +var p5686Var = "object " +var p5686 = &p5686Var +var p5697Var = "thunk from >" +var p5697 = &p5697Var +var p5706Var = "thunk from >" +var p5706 = &p5706Var +var p5710Var = "function " +var p5710 = &p5710Var +var p5725Var = "thunk from >" +var p5725 = &p5725Var +var p5733Var = "thunk from >" +var p5733 = &p5733Var +var p5758Var = "thunk from >" +var p5758 = &p5758Var +var p5770Var = "object " +var p5770 = &p5770Var +var p5778Var = "thunk from >" var p5778 = &p5778Var -var p5818Var = "thunk from >" -var p5818 = &p5818Var -var p5827Var = "thunk from from >>" -var p5827 = &p5827Var -var p5846Var = "thunk from from >>>" -var p5846 = &p5846Var -var p5865Var = "thunk from >" +var p5782Var = "function " +var p5782 = &p5782Var +var p5797Var = "thunk from >" +var p5797 = &p5797Var +var p5805Var = "thunk from >" +var p5805 = &p5805Var +var p5861Var = "thunk from >" +var p5861 = &p5861Var +var p5865Var = "function " var p5865 = &p5865Var -var p5871Var = "thunk from from >>" -var p5871 = &p5871Var -var p5899Var = "thunk from >" -var p5899 = &p5899Var -var p5909Var = "thunk from >" -var p5909 = &p5909Var -var p5913Var = "function " -var p5913 = &p5913Var -var p5917Var = "thunk from >" -var p5917 = &p5917Var -var p5926Var = "thunk from from >>" -var p5926 = &p5926Var -var p5937Var = "thunk from from >>>" -var p5937 = &p5937Var -var p5946Var = "thunk from from >>>>" -var p5946 = &p5946Var -var p5957Var = "thunk from from >>>" -var p5957 = &p5957Var -var p5962Var = "thunk from >" -var p5962 = &p5962Var -var p5976Var = "thunk from from >>" -var p5976 = &p5976Var -var p5988Var = "thunk from >" -var p5988 = &p5988Var -var p6001Var = "thunk from from >>" -var p6001 = &p6001Var -var p6008Var = "thunk from >" -var p6008 = &p6008Var -var p6021Var = "thunk from from >>" -var p6021 = &p6021Var -var p6031Var = "thunk from >" -var p6031 = &p6031Var -var p6051Var = "thunk from >" -var p6051 = &p6051Var -var p6055Var = "function " -var p6055 = &p6055Var -var p6059Var = "thunk from >" -var p6059 = &p6059Var -var p6068Var = "thunk from >" -var p6068 = &p6068Var -var p6090Var = "thunk from >" -var p6090 = &p6090Var -var p6112Var = "thunk from >" -var p6112 = &p6112Var -var p6156Var = "thunk from >" -var p6156 = &p6156Var -var p6195Var = "thunk from >" -var p6195 = &p6195Var -var p6219Var = "thunk from >" -var p6219 = &p6219Var -var p6227Var = "thunk from >" -var p6227 = &p6227Var -var p6282Var = "thunk from >" +var p5880Var = "thunk from >" +var p5880 = &p5880Var +var p5888Var = "thunk from >" +var p5888 = &p5888Var +var p5938Var = "object " +var p5938 = &p5938Var +var p5965Var = "object " +var p5965 = &p5965Var +var p5992Var = "object " +var p5992 = &p5992Var +var p6019Var = "object " +var p6019 = &p6019Var +var p6046Var = "object " +var p6046 = &p6046Var +var p6073Var = "object " +var p6073 = &p6073Var +var p6100Var = "object " +var p6100 = &p6100Var +var p6127Var = "object " +var p6127 = &p6127Var +var p6154Var = "object " +var p6154 = &p6154Var +var p6181Var = "object " +var p6181 = &p6181Var +var p6208Var = "object " +var p6208 = &p6208Var +var p6235Var = "object " +var p6235 = &p6235Var +var p6262Var = "object " +var p6262 = &p6262Var +var p6282Var = "thunk from >" var p6282 = &p6282Var -var p6306Var = "thunk from >" -var p6306 = &p6306Var -var p6312Var = "thunk from >" -var p6312 = &p6312Var -var p6327Var = "thunk from >" -var p6327 = &p6327Var -var p6383Var = "thunk from >" -var p6383 = &p6383Var -var p6407Var = "thunk from >" -var p6407 = &p6407Var -var p6415Var = "thunk from >" -var p6415 = &p6415Var -var p6478Var = "thunk from >" -var p6478 = &p6478Var -var p6502Var = "thunk from >" -var p6502 = &p6502Var -var p6510Var = "thunk from >" -var p6510 = &p6510Var -var p6569Var = "thunk from >" -var p6569 = &p6569Var -var p6593Var = "thunk from >" -var p6593 = &p6593Var -var p6601Var = "thunk from >" -var p6601 = &p6601Var -var p6665Var = "thunk from >" -var p6665 = &p6665Var -var p6689Var = "thunk from >" -var p6689 = &p6689Var -var p6695Var = "thunk from >" +var p6286Var = "function " +var p6286 = &p6286Var +var p6301Var = "thunk from >" +var p6301 = &p6301Var +var p6309Var = "thunk from >" +var p6309 = &p6309Var +var p6315Var = "thunk from from >>" +var p6315 = &p6315Var +var p6323Var = "thunk from >" +var p6323 = &p6323Var +var p6329Var = "thunk from from >>" +var p6329 = &p6329Var +var p6340Var = "thunk from >" +var p6340 = &p6340Var +var p6346Var = "thunk from from >>" +var p6346 = &p6346Var +var p6357Var = "thunk from >" +var p6357 = &p6357Var +var p6363Var = "thunk from from >>" +var p6363 = &p6363Var +var p6374Var = "thunk from >" +var p6374 = &p6374Var +var p6380Var = "thunk from from >>" +var p6380 = &p6380Var +var p6391Var = "thunk from >" +var p6391 = &p6391Var +var p6397Var = "thunk from from >>" +var p6397 = &p6397Var +var p6406Var = "object " +var p6406 = &p6406Var +var p6417Var = "object " +var p6417 = &p6417Var +var p6456Var = "thunk from >" +var p6456 = &p6456Var +var p6460Var = "function " +var p6460 = &p6460Var +var p6475Var = "thunk from >" +var p6475 = &p6475Var +var p6485Var = "thunk from >" +var p6485 = &p6485Var +var p6491Var = "thunk from >" +var p6491 = &p6491Var +var p6514Var = "thunk from >" +var p6514 = &p6514Var +var p6520Var = "thunk from from >>" +var p6520 = &p6520Var +var p6533Var = "thunk from >" +var p6533 = &p6533Var +var p6548Var = "thunk from >>" +var p6548 = &p6548Var +var p6562Var = "thunk from >" +var p6562 = &p6562Var +var p6581Var = "thunk from >" +var p6581 = &p6581Var +var p6587Var = "thunk from from >>" +var p6587 = &p6587Var +var p6596Var = "thunk from >" +var p6596 = &p6596Var +var p6599Var = "function " +var p6599 = &p6599Var +var p6603Var = "thunk from >" +var p6603 = &p6603Var +var p6607Var = "function " +var p6607 = &p6607Var +var p6622Var = "thunk from >" +var p6622 = &p6622Var +var p6639Var = "thunk from >" +var p6639 = &p6639Var +var p6646Var = "thunk from >" +var p6646 = &p6646Var +var p6650Var = "function " +var p6650 = &p6650Var +var p6658Var = "thunk from >" +var p6658 = &p6658Var +var p6671Var = "thunk from >>" +var p6671 = &p6671Var +var p6681Var = "thunk from >" +var p6681 = &p6681Var +var p6685Var = "function " +var p6685 = &p6685Var +var p6695Var = "thunk from >" var p6695 = &p6695Var -var p6704Var = "thunk from from >>" -var p6704 = &p6704Var -var p6715Var = "thunk from from >>>" -var p6715 = &p6715Var -var p6724Var = "thunk from from >>>>" +var p6708Var = "thunk from >>" +var p6708 = &p6708Var +var p6716Var = "thunk from >" +var p6716 = &p6716Var +var p6720Var = "function " +var p6720 = &p6720Var +var p6724Var = "thunk from >" var p6724 = &p6724Var -var p6735Var = "thunk from from >>>" -var p6735 = &p6735Var -var p6758Var = "thunk from >" -var p6758 = &p6758Var -var p6796Var = "thunk from >" -var p6796 = &p6796Var -var p6805Var = "thunk from from >>" -var p6805 = &p6805Var -var p6817Var = "thunk from >" -var p6817 = &p6817Var -var p6882Var = "thunk from >" -var p6882 = &p6882Var -var p6894Var = "thunk from >" -var p6894 = &p6894Var -var p6915Var = "thunk from >" -var p6915 = &p6915Var -var p6937Var = "thunk from >" +var p6733Var = "thunk from from >>" +var p6733 = &p6733Var +var p6739Var = "thunk from >" +var p6739 = &p6739Var +var p6743Var = "function " +var p6743 = &p6743Var +var p6766Var = "thunk from >" +var p6766 = &p6766Var +var p6775Var = "thunk from >>" +var p6775 = &p6775Var +var p6797Var = "thunk from >" +var p6797 = &p6797Var +var p6816Var = "thunk from from >>" +var p6816 = &p6816Var +var p6826Var = "thunk from from >>" +var p6826 = &p6826Var +var p6835Var = "thunk from from >>>" +var p6835 = &p6835Var +var p6841Var = "thunk from >" +var p6841 = &p6841Var +var p6850Var = "thunk from >" +var p6850 = &p6850Var +var p6872Var = "thunk from >" +var p6872 = &p6872Var +var p6881Var = "thunk from from >>" +var p6881 = &p6881Var +var p6889Var = "thunk from >" +var p6889 = &p6889Var +var p6895Var = "thunk from from >>" +var p6895 = &p6895Var +var p6924Var = "thunk from >" +var p6924 = &p6924Var +var p6928Var = "function " +var p6928 = &p6928Var +var p6932Var = "thunk from >" +var p6932 = &p6932Var +var p6937Var = "thunk from from >>" var p6937 = &p6937Var -var p6956Var = "thunk from >" -var p6956 = &p6956Var -var p6972Var = "thunk from >" -var p6972 = &p6972Var -var p6988Var = "thunk from >" +var p6954Var = "thunk from from >>" +var p6954 = &p6954Var +var p6963Var = "thunk from from >>" +var p6963 = &p6963Var +var p6973Var = "thunk from >" +var p6973 = &p6973Var +var p6982Var = "thunk from from >>" +var p6982 = &p6982Var +var p6988Var = "thunk from >" var p6988 = &p6988Var -var p6992Var = "function " +var p6992Var = "function " var p6992 = &p6992Var -var p7007Var = "thunk from >" -var p7007 = &p7007Var -var p7024Var = "thunk from >" -var p7024 = &p7024Var -var p7044Var = "thunk from >" -var p7044 = &p7044Var -var p7055Var = "thunk from >" -var p7055 = &p7055Var -var p7080Var = "thunk from >" -var p7080 = &p7080Var -var p7089Var = "thunk from >" -var p7089 = &p7089Var -var p7110Var = "thunk from >" -var p7110 = &p7110Var -var p7131Var = "object " -var p7131 = &p7131Var -var p7152Var = "thunk from >" -var p7152 = &p7152Var -var p7168Var = "thunk from >" -var p7168 = &p7168Var -var p7181Var = "object " -var p7181 = &p7181Var -var p7193Var = "thunk from >" -var p7193 = &p7193Var -var p7214Var = "object " -var p7214 = &p7214Var -var p7241Var = "thunk from >" -var p7241 = &p7241Var -var p7257Var = "thunk from >" -var p7257 = &p7257Var -var p7273Var = "object " -var p7273 = &p7273Var -var p7288Var = "thunk from >" +var p7014Var = "thunk from >" +var p7014 = &p7014Var +var p7023Var = "thunk from >>" +var p7023 = &p7023Var +var p7047Var = "thunk from >" +var p7047 = &p7047Var +var p7066Var = "thunk from from >>" +var p7066 = &p7066Var +var p7076Var = "thunk from from >>" +var p7076 = &p7076Var +var p7085Var = "thunk from from >>>" +var p7085 = &p7085Var +var p7091Var = "thunk from >" +var p7091 = &p7091Var +var p7100Var = "thunk from >" +var p7100 = &p7100Var +var p7130Var = "thunk from >" +var p7130 = &p7130Var +var p7139Var = "thunk from from >>" +var p7139 = &p7139Var +var p7147Var = "thunk from >" +var p7147 = &p7147Var +var p7166Var = "thunk from from >>" +var p7166 = &p7166Var +var p7195Var = "thunk from >" +var p7195 = &p7195Var +var p7199Var = "function " +var p7199 = &p7199Var +var p7203Var = "thunk from >" +var p7203 = &p7203Var +var p7207Var = "function " +var p7207 = &p7207Var +var p7238Var = "thunk from >" +var p7238 = &p7238Var +var p7254Var = "thunk from >" +var p7254 = &p7254Var +var p7268Var = "thunk from >" +var p7268 = &p7268Var +var p7281Var = "thunk from >>" +var p7281 = &p7281Var +var p7288Var = "thunk from >" var p7288 = &p7288Var -var p7297Var = "thunk from >" -var p7297 = &p7297Var -var p7312Var = "thunk from from >>" -var p7312 = &p7312Var -var p7334Var = "thunk from from >>" -var p7334 = &p7334Var -var p7340Var = "thunk from >" -var p7340 = &p7340Var -var p7363Var = "thunk from from >>" -var p7363 = &p7363Var -var p7383Var = "thunk from >" -var p7383 = &p7383Var -var p7399Var = "thunk from from >>" -var p7399 = &p7399Var -var p7413Var = "thunk from from >>" -var p7413 = &p7413Var -var p7425Var = "thunk from >" -var p7425 = &p7425Var -var p7454Var = "thunk from >" -var p7454 = &p7454Var -var p7475Var = "thunk from >" +var p7292Var = "function " +var p7292 = &p7292Var +var p7296Var = "thunk from >" +var p7296 = &p7296Var +var p7305Var = "thunk from from >>" +var p7305 = &p7305Var +var p7311Var = "thunk from >" +var p7311 = &p7311Var +var p7320Var = "thunk from from >>" +var p7320 = &p7320Var +var p7326Var = "thunk from >" +var p7326 = &p7326Var +var p7351Var = "thunk from >" +var p7351 = &p7351Var +var p7365Var = "thunk from >" +var p7365 = &p7365Var +var p7371Var = "thunk from from >>" +var p7371 = &p7371Var +var p7382Var = "thunk from from >>>" +var p7382 = &p7382Var +var p7422Var = "thunk from >" +var p7422 = &p7422Var +var p7431Var = "thunk from from >>" +var p7431 = &p7431Var +var p7450Var = "thunk from from >>>" +var p7450 = &p7450Var +var p7469Var = "thunk from >" +var p7469 = &p7469Var +var p7475Var = "thunk from from >>" var p7475 = &p7475Var -var p7479Var = "function " -var p7479 = &p7479Var -var p7494Var = "thunk from >" -var p7494 = &p7494Var -var p7502Var = "thunk from >" -var p7502 = &p7502Var -var p7527Var = "thunk from >" -var p7527 = &p7527Var -var p7536Var = "thunk from >" -var p7536 = &p7536Var -var p7555Var = "thunk from >" -var p7555 = &p7555Var -var p7582Var = "thunk from >" -var p7582 = &p7582Var -var p7609Var = "thunk from >" -var p7609 = &p7609Var -var p7636Var = "thunk from >" -var p7636 = &p7636Var -var p7647Var = "thunk from from >>" -var p7647 = &p7647Var -var p7668Var = "thunk from >" -var p7668 = &p7668Var -var p7691Var = "thunk from from >>" -var p7691 = &p7691Var -var p7705Var = "thunk from >" -var p7705 = &p7705Var -var p7721Var = "thunk from from >>" -var p7721 = &p7721Var -var p7732Var = "thunk from from >>" -var p7732 = &p7732Var -var p7743Var = "thunk from >" -var p7743 = &p7743Var -var p7777Var = "thunk from >" -var p7777 = &p7777Var -var p7786Var = "thunk from >" -var p7786 = &p7786Var -var p7812Var = "thunk from >" -var p7812 = &p7812Var -var p7821Var = "thunk from >" -var p7821 = &p7821Var -var p7833Var = "thunk from >" -var p7833 = &p7833Var -var p7839Var = "thunk from >>" -var p7839 = &p7839Var -var p7850Var = "function " -var p7850 = &p7850Var -var p7854Var = "thunk from >" -var p7854 = &p7854Var -var p7858Var = "function " -var p7858 = &p7858Var -var p7873Var = "thunk from >" -var p7873 = &p7873Var -var p7883Var = "thunk from >>" -var p7883 = &p7883Var -var p7902Var = "thunk from >" -var p7902 = &p7902Var -var p7919Var = "thunk from >>" -var p7919 = &p7919Var -var p7928Var = "function " -var p7928 = &p7928Var -var p7932Var = "thunk from >" -var p7932 = &p7932Var -var p7936Var = "function " -var p7936 = &p7936Var -var p7951Var = "thunk from >" -var p7951 = &p7951Var -var p7961Var = "thunk from >" -var p7961 = &p7961Var -var p7971Var = "thunk from >>" -var p7971 = &p7971Var -var p7990Var = "thunk from >" -var p7990 = &p7990Var -var p8003Var = "function " -var p8003 = &p8003Var -var p8024Var = "thunk from >" -var p8024 = &p8024Var -var p8041Var = "thunk from >" -var p8041 = &p8041Var -var p8064Var = "thunk from >" -var p8064 = &p8064Var -var p8081Var = "thunk from >" -var p8081 = &p8081Var -var p8104Var = "thunk from >" -var p8104 = &p8104Var -var p8121Var = "thunk from >" -var p8121 = &p8121Var -var p8132Var = "thunk from >" -var p8132 = &p8132Var -var p8143Var = "thunk from >>" -var p8143 = &p8143Var -var p8153Var = "function " -var p8153 = &p8153Var -var p8188Var = "function " -var p8188 = &p8188Var -var p8209Var = "thunk from >" -var p8209 = &p8209Var -var p8226Var = "thunk from >" -var p8226 = &p8226Var -var p8247Var = "function " -var p8247 = &p8247Var -var p8268Var = "thunk from >" -var p8268 = &p8268Var -var p8285Var = "thunk from >" -var p8285 = &p8285Var -var p8311Var = "function " -var p8311 = &p8311Var -var p8332Var = "thunk from >" -var p8332 = &p8332Var -var p8349Var = "thunk from >" -var p8349 = &p8349Var -var p8372Var = "thunk from >" -var p8372 = &p8372Var -var p8389Var = "thunk from >" -var p8389 = &p8389Var -var p8409Var = "function " +var p7503Var = "thunk from >" +var p7503 = &p7503Var +var p7513Var = "thunk from >" +var p7513 = &p7513Var +var p7517Var = "function " +var p7517 = &p7517Var +var p7521Var = "thunk from >" +var p7521 = &p7521Var +var p7530Var = "thunk from from >>" +var p7530 = &p7530Var +var p7541Var = "thunk from from >>>" +var p7541 = &p7541Var +var p7550Var = "thunk from from >>>>" +var p7550 = &p7550Var +var p7561Var = "thunk from from >>>" +var p7561 = &p7561Var +var p7566Var = "thunk from >" +var p7566 = &p7566Var +var p7580Var = "thunk from from >>" +var p7580 = &p7580Var +var p7592Var = "thunk from >" +var p7592 = &p7592Var +var p7605Var = "thunk from from >>" +var p7605 = &p7605Var +var p7612Var = "thunk from >" +var p7612 = &p7612Var +var p7625Var = "thunk from from >>" +var p7625 = &p7625Var +var p7635Var = "thunk from >" +var p7635 = &p7635Var +var p7655Var = "thunk from >" +var p7655 = &p7655Var +var p7659Var = "function " +var p7659 = &p7659Var +var p7663Var = "thunk from >" +var p7663 = &p7663Var +var p7672Var = "thunk from >" +var p7672 = &p7672Var +var p7694Var = "thunk from >" +var p7694 = &p7694Var +var p7716Var = "thunk from >" +var p7716 = &p7716Var +var p7760Var = "thunk from >" +var p7760 = &p7760Var +var p7799Var = "thunk from >" +var p7799 = &p7799Var +var p7823Var = "thunk from >" +var p7823 = &p7823Var +var p7831Var = "thunk from >" +var p7831 = &p7831Var +var p7886Var = "thunk from >" +var p7886 = &p7886Var +var p7910Var = "thunk from >" +var p7910 = &p7910Var +var p7916Var = "thunk from >" +var p7916 = &p7916Var +var p7931Var = "thunk from >" +var p7931 = &p7931Var +var p7987Var = "thunk from >" +var p7987 = &p7987Var +var p8011Var = "thunk from >" +var p8011 = &p8011Var +var p8019Var = "thunk from >" +var p8019 = &p8019Var +var p8082Var = "thunk from >" +var p8082 = &p8082Var +var p8106Var = "thunk from >" +var p8106 = &p8106Var +var p8114Var = "thunk from >" +var p8114 = &p8114Var +var p8173Var = "thunk from >" +var p8173 = &p8173Var +var p8197Var = "thunk from >" +var p8197 = &p8197Var +var p8205Var = "thunk from >" +var p8205 = &p8205Var +var p8269Var = "thunk from >" +var p8269 = &p8269Var +var p8293Var = "thunk from >" +var p8293 = &p8293Var +var p8299Var = "thunk from >" +var p8299 = &p8299Var +var p8308Var = "thunk from from >>" +var p8308 = &p8308Var +var p8319Var = "thunk from from >>>" +var p8319 = &p8319Var +var p8328Var = "thunk from from >>>>" +var p8328 = &p8328Var +var p8339Var = "thunk from from >>>" +var p8339 = &p8339Var +var p8362Var = "thunk from >" +var p8362 = &p8362Var +var p8400Var = "thunk from >" +var p8400 = &p8400Var +var p8409Var = "thunk from from >>" var p8409 = &p8409Var -var p8430Var = "thunk from >" -var p8430 = &p8430Var -var p8447Var = "thunk from >" -var p8447 = &p8447Var -var p8470Var = "thunk from >" -var p8470 = &p8470Var -var p8487Var = "thunk from >" -var p8487 = &p8487Var -var p8507Var = "function " -var p8507 = &p8507Var -var p8516Var = "thunk from >" -var p8516 = &p8516Var -var p8519Var = "function " +var p8421Var = "thunk from >" +var p8421 = &p8421Var +var p8486Var = "thunk from >" +var p8486 = &p8486Var +var p8498Var = "thunk from >" +var p8498 = &p8498Var +var p8519Var = "thunk from >" var p8519 = &p8519Var -var p8534Var = "function " -var p8534 = &p8534Var -var p8538Var = "thunk from >" -var p8538 = &p8538Var -var p8542Var = "function " -var p8542 = &p8542Var -var p8551Var = "thunk from >" -var p8551 = &p8551Var -var p8567Var = "thunk from >>" -var p8567 = &p8567Var -var p8571Var = "thunk from >>>" -var p8571 = &p8571Var -var p8596Var = "thunk from >>>" +var p8541Var = "thunk from >" +var p8541 = &p8541Var +var p8560Var = "thunk from >" +var p8560 = &p8560Var +var p8576Var = "thunk from >" +var p8576 = &p8576Var +var p8592Var = "thunk from >" +var p8592 = &p8592Var +var p8596Var = "function " var p8596 = &p8596Var -var p8622Var = "thunk from >>>" -var p8622 = &p8622Var -var p8627Var = "thunk from >>>>" -var p8627 = &p8627Var -var p8645Var = "thunk from >>>" -var p8645 = &p8645Var -var p8650Var = "thunk from >>>>" -var p8650 = &p8650Var -var p8663Var = "thunk from >>" -var p8663 = &p8663Var -var p8669Var = "thunk from >" -var p8669 = &p8669Var -var p8673Var = "function " -var p8673 = &p8673Var -var p8687Var = "thunk from >" -var p8687 = &p8687Var -var p8692Var = "thunk from >>" -var p8692 = &p8692Var -var p8700Var = "thunk from >" -var p8700 = &p8700Var -var p8703Var = "thunk from >" -var p8703 = &p8703Var -var p8714Var = "thunk from from >>" +var p8611Var = "thunk from >" +var p8611 = &p8611Var +var p8628Var = "thunk from >" +var p8628 = &p8628Var +var p8648Var = "thunk from >" +var p8648 = &p8648Var +var p8659Var = "thunk from >" +var p8659 = &p8659Var +var p8684Var = "thunk from >" +var p8684 = &p8684Var +var p8693Var = "thunk from >" +var p8693 = &p8693Var +var p8714Var = "thunk from >" var p8714 = &p8714Var -var p8723Var = "thunk from from >>" -var p8723 = &p8723Var -var p8744Var = "thunk from from >>" -var p8744 = &p8744Var -var p8750Var = "thunk from from >>>" -var p8750 = &p8750Var -var p8762Var = "thunk from >" -var p8762 = &p8762Var -var p8771Var = "thunk from from >>" -var p8771 = &p8771Var -var p8785Var = "thunk from >" +var p8735Var = "object " +var p8735 = &p8735Var +var p8756Var = "thunk from >" +var p8756 = &p8756Var +var p8772Var = "thunk from >" +var p8772 = &p8772Var +var p8785Var = "object " var p8785 = &p8785Var -var p8801Var = "thunk from >>" -var p8801 = &p8801Var -var p8806Var = "thunk from >>" -var p8806 = &p8806Var -var p8813Var = "function " -var p8813 = &p8813Var -var p8817Var = "thunk from >" -var p8817 = &p8817Var -var p8826Var = "thunk from from >>" -var p8826 = &p8826Var -var p8832Var = "thunk from >" -var p8832 = &p8832Var -var p8836Var = "function " -var p8836 = &p8836Var -var p8938Var = "thunk from >" +var p8797Var = "thunk from >" +var p8797 = &p8797Var +var p8818Var = "object " +var p8818 = &p8818Var +var p8845Var = "thunk from >" +var p8845 = &p8845Var +var p8861Var = "thunk from >" +var p8861 = &p8861Var +var p8877Var = "object " +var p8877 = &p8877Var +var p8892Var = "thunk from >" +var p8892 = &p8892Var +var p8901Var = "thunk from >" +var p8901 = &p8901Var +var p8916Var = "thunk from from >>" +var p8916 = &p8916Var +var p8938Var = "thunk from from >>" var p8938 = &p8938Var -var p8947Var = "thunk from from >>" -var p8947 = &p8947Var -var p8983Var = "thunk from >" -var p8983 = &p8983Var -var p9005Var = "thunk from >" -var p9005 = &p9005Var -var p9021Var = "thunk from >>" -var p9021 = &p9021Var -var p9027Var = "thunk from >>>" -var p9027 = &p9027Var -var p9038Var = "thunk from >>" -var p9038 = &p9038Var -var p9046Var = "function " -var p9046 = &p9046Var -var p9055Var = "thunk from >" -var p9055 = &p9055Var -var p9063Var = "function " -var p9063 = &p9063Var -var p9067Var = "thunk from >" -var p9067 = &p9067Var -var p9076Var = "thunk from from >>" -var p9076 = &p9076Var -var p9082Var = "thunk from >" -var p9082 = &p9082Var -var p9086Var = "function " -var p9086 = &p9086Var -var p9120Var = "thunk from >" -var p9120 = &p9120Var -var p9136Var = "thunk from >>" -var p9136 = &p9136Var -var p9142Var = "thunk from >>>" -var p9142 = &p9142Var -var p9153Var = "thunk from >>" -var p9153 = &p9153Var -var p9161Var = "function " -var p9161 = &p9161Var -var p9165Var = "thunk from >" -var p9165 = &p9165Var -var p9174Var = "thunk from from >>" -var p9174 = &p9174Var -var p9180Var = "thunk from >" -var p9180 = &p9180Var -var p9184Var = "function " -var p9184 = &p9184Var -var p9209Var = "thunk from >" -var p9209 = &p9209Var -var p9213Var = "function " +var p8944Var = "thunk from >" +var p8944 = &p8944Var +var p8967Var = "thunk from from >>" +var p8967 = &p8967Var +var p8987Var = "thunk from >" +var p8987 = &p8987Var +var p9003Var = "thunk from from >>" +var p9003 = &p9003Var +var p9017Var = "thunk from from >>" +var p9017 = &p9017Var +var p9029Var = "thunk from >" +var p9029 = &p9029Var +var p9058Var = "thunk from >" +var p9058 = &p9058Var +var p9079Var = "thunk from >" +var p9079 = &p9079Var +var p9083Var = "function " +var p9083 = &p9083Var +var p9098Var = "thunk from >" +var p9098 = &p9098Var +var p9106Var = "thunk from >" +var p9106 = &p9106Var +var p9131Var = "thunk from >" +var p9131 = &p9131Var +var p9140Var = "thunk from >" +var p9140 = &p9140Var +var p9159Var = "thunk from >" +var p9159 = &p9159Var +var p9186Var = "thunk from >" +var p9186 = &p9186Var +var p9213Var = "thunk from >" var p9213 = &p9213Var -var p9223Var = "thunk from >" -var p9223 = &p9223Var -var p9234Var = "thunk from >>" -var p9234 = &p9234Var -var p9243Var = "function " -var p9243 = &p9243Var -var p9252Var = "thunk from >" -var p9252 = &p9252Var -var p9261Var = "function " -var p9261 = &p9261Var -var p9265Var = "thunk from >" -var p9265 = &p9265Var -var p9269Var = "function " -var p9269 = &p9269Var -var p9330Var = "thunk from >" -var p9330 = &p9330Var -var p9357Var = "thunk from >" -var p9357 = &p9357Var -var p9369Var = "thunk from >" -var p9369 = &p9369Var -var p9390Var = "thunk from >" +var p9240Var = "thunk from >" +var p9240 = &p9240Var +var p9251Var = "thunk from from >>" +var p9251 = &p9251Var +var p9272Var = "thunk from >" +var p9272 = &p9272Var +var p9295Var = "thunk from from >>" +var p9295 = &p9295Var +var p9309Var = "thunk from >" +var p9309 = &p9309Var +var p9325Var = "thunk from from >>" +var p9325 = &p9325Var +var p9336Var = "thunk from from >>" +var p9336 = &p9336Var +var p9347Var = "thunk from >" +var p9347 = &p9347Var +var p9381Var = "thunk from >" +var p9381 = &p9381Var +var p9390Var = "thunk from >" var p9390 = &p9390Var -var p9419Var = "thunk from >" -var p9419 = &p9419Var -var p9426Var = "thunk from >" -var p9426 = &p9426Var -var p9435Var = "thunk from from >>" -var p9435 = &p9435Var -var p9447Var = "thunk from from >>>" -var p9447 = &p9447Var -var p9454Var = "thunk from >" +var p9416Var = "thunk from >" +var p9416 = &p9416Var +var p9425Var = "thunk from >" +var p9425 = &p9425Var +var p9437Var = "thunk from >" +var p9437 = &p9437Var +var p9443Var = "thunk from >>" +var p9443 = &p9443Var +var p9452Var = "object " +var p9452 = &p9452Var +var p9454Var = "object " var p9454 = &p9454Var -var p9464Var = "thunk from >" -var p9464 = &p9464Var -var p9471Var = "thunk from from >>" -var p9471 = &p9471Var -var p9481Var = "thunk from from >>" -var p9481 = &p9481Var -var p9484Var = "thunk from from >>>" -var p9484 = &p9484Var -var p9500Var = "thunk from from >>>" +var p9478Var = "object " +var p9478 = &p9478Var +var p9482Var = "object " +var p9482 = &p9482Var +var p9485Var = "object " +var p9485 = &p9485Var +var p9488Var = "object " +var p9488 = &p9488Var +var p9491Var = "object " +var p9491 = &p9491Var +var p9494Var = "object " +var p9494 = &p9494Var +var p9497Var = "object " +var p9497 = &p9497Var +var p9500Var = "object " var p9500 = &p9500Var -var p9504Var = "thunk from from >>>>" -var p9504 = &p9504Var -var p9514Var = "thunk from from >>>>>" -var p9514 = &p9514Var -var p9528Var = "thunk from from >>>>>>" -var p9528 = &p9528Var -var p9538Var = "thunk from from >>" -var p9538 = &p9538Var -var p9555Var = "thunk from >" -var p9555 = &p9555Var -var p9577Var = "thunk from >" -var p9577 = &p9577Var -var p9584Var = "thunk from >" -var p9584 = &p9584Var -var p9591Var = "thunk from from >>" +var p9505Var = "thunk from >" +var p9505 = &p9505Var +var p9507Var = "thunk from >" +var p9507 = &p9507Var +var p9513Var = "function " +var p9513 = &p9513Var +var p9517Var = "thunk from >" +var p9517 = &p9517Var +var p9521Var = "function " +var p9521 = &p9521Var +var p9536Var = "thunk from >" +var p9536 = &p9536Var +var p9546Var = "thunk from >>" +var p9546 = &p9546Var +var p9565Var = "thunk from >" +var p9565 = &p9565Var +var p9582Var = "thunk from >>" +var p9582 = &p9582Var +var p9589Var = "object " +var p9589 = &p9589Var +var p9591Var = "object " var p9591 = &p9591Var -var p9601Var = "thunk from from >>" -var p9601 = &p9601Var -var p9604Var = "thunk from from >>>" -var p9604 = &p9604Var -var p9620Var = "thunk from from >>>" -var p9620 = &p9620Var -var p9624Var = "thunk from from >>>>" -var p9624 = &p9624Var -var p9645Var = "thunk from from >>>>>" -var p9645 = &p9645Var -var p9654Var = "thunk from from >>>>>" +var p9615Var = "object " +var p9615 = &p9615Var +var p9619Var = "object " +var p9619 = &p9619Var +var p9622Var = "object " +var p9622 = &p9622Var +var p9625Var = "object " +var p9625 = &p9625Var +var p9628Var = "object " +var p9628 = &p9628Var +var p9631Var = "object " +var p9631 = &p9631Var +var p9634Var = "object " +var p9634 = &p9634Var +var p9637Var = "object " +var p9637 = &p9637Var +var p9642Var = "thunk from >" +var p9642 = &p9642Var +var p9644Var = "thunk from >" +var p9644 = &p9644Var +var p9650Var = "function " +var p9650 = &p9650Var +var p9654Var = "thunk from >" var p9654 = &p9654Var -var p9668Var = "thunk from from >>>>>>" -var p9668 = &p9668Var -var p9685Var = "thunk from from >>>" -var p9685 = &p9685Var -var p9691Var = "thunk from from >>" -var p9691 = &p9691Var -var p9708Var = "thunk from >" -var p9708 = &p9708Var -var p9718Var = "thunk from >" -var p9718 = &p9718Var -var p9728Var = "function " -var p9728 = &p9728Var -var p9732Var = "thunk from >" -var p9732 = &p9732Var -var p9736Var = "function " -var p9736 = &p9736Var -var p9797Var = "thunk from >" -var p9797 = &p9797Var -var p9824Var = "thunk from >" -var p9824 = &p9824Var -var p9831Var = "thunk from >" -var p9831 = &p9831Var -var p9840Var = "thunk from from >>" -var p9840 = &p9840Var -var p9880Var = "thunk from >" -var p9880 = &p9880Var -var p9889Var = "thunk from from >>" -var p9889 = &p9889Var -var p9901Var = "thunk from >" -var p9901 = &p9901Var -var p9911Var = "thunk from >>" -var p9911 = &p9911Var -var p9934Var = "thunk from >>" +var p9658Var = "function " +var p9658 = &p9658Var +var p9673Var = "thunk from >" +var p9673 = &p9673Var +var p9683Var = "thunk from >" +var p9683 = &p9683Var +var p9693Var = "thunk from >>" +var p9693 = &p9693Var +var p9712Var = "thunk from >" +var p9712 = &p9712Var +var p9723Var = "object " +var p9723 = &p9723Var +var p9725Var = "object " +var p9725 = &p9725Var +var p9749Var = "object " +var p9749 = &p9749Var +var p9753Var = "object " +var p9753 = &p9753Var +var p9756Var = "object " +var p9756 = &p9756Var +var p9759Var = "object " +var p9759 = &p9759Var +var p9762Var = "object " +var p9762 = &p9762Var +var p9765Var = "object " +var p9765 = &p9765Var +var p9768Var = "object " +var p9768 = &p9768Var +var p9771Var = "object " +var p9771 = &p9771Var +var p9776Var = "thunk from >" +var p9776 = &p9776Var +var p9778Var = "thunk from >" +var p9778 = &p9778Var +var p9784Var = "function " +var p9784 = &p9784Var +var p9805Var = "thunk from >" +var p9805 = &p9805Var +var p9822Var = "thunk from >" +var p9822 = &p9822Var +var p9845Var = "thunk from >" +var p9845 = &p9845Var +var p9862Var = "thunk from >" +var p9862 = &p9862Var +var p9885Var = "thunk from >" +var p9885 = &p9885Var +var p9902Var = "thunk from >" +var p9902 = &p9902Var +var p9913Var = "thunk from >" +var p9913 = &p9913Var +var p9924Var = "thunk from >>" +var p9924 = &p9924Var +var p9932Var = "object " +var p9932 = &p9932Var +var p9934Var = "object " var p9934 = &p9934Var -var p9947Var = "thunk from >" -var p9947 = &p9947Var -var p9968Var = "thunk from >" +var p9958Var = "object " +var p9958 = &p9958Var +var p9962Var = "object " +var p9962 = &p9962Var +var p9965Var = "object " +var p9965 = &p9965Var +var p9968Var = "object " var p9968 = &p9968Var -var p9997Var = "thunk from >" -var p9997 = &p9997Var -var p10019Var = "thunk from >" -var p10019 = &p10019Var -var p10027Var = "thunk from >" -var p10027 = &p10027Var -var p10036Var = "thunk from from >>" -var p10036 = &p10036Var -var p10048Var = "thunk from from >>>" -var p10048 = &p10048Var -var p10055Var = "thunk from >" -var p10055 = &p10055Var -var p10078Var = "thunk from from >>" -var p10078 = &p10078Var -var p10084Var = "thunk from from >>>" -var p10084 = &p10084Var -var p10100Var = "thunk from from >>>>" -var p10100 = &p10100Var -var p10105Var = "thunk from >" -var p10105 = &p10105Var -var p10131Var = "thunk from >" -var p10131 = &p10131Var -var p10160Var = "thunk from >" -var p10160 = &p10160Var -var p10182Var = "thunk from >" -var p10182 = &p10182Var -var p10190Var = "thunk from >" -var p10190 = &p10190Var -var p10213Var = "thunk from from >>" -var p10213 = &p10213Var -var p10230Var = "thunk from from >>>" -var p10230 = &p10230Var -var p10239Var = "thunk from from >>>" -var p10239 = &p10239Var -var p10255Var = "thunk from from >>>>" -var p10255 = &p10255Var -var p10260Var = "thunk from >" -var p10260 = &p10260Var -var p10269Var = "thunk from from >>" +var p9971Var = "object " +var p9971 = &p9971Var +var p9974Var = "object " +var p9974 = &p9974Var +var p9977Var = "object " +var p9977 = &p9977Var +var p9980Var = "object " +var p9980 = &p9980Var +var p9985Var = "thunk from >" +var p9985 = &p9985Var +var p9987Var = "thunk from >" +var p9987 = &p9987Var +var p9993Var = "function " +var p9993 = &p9993Var +var p10026Var = "object " +var p10026 = &p10026Var +var p10028Var = "object " +var p10028 = &p10028Var +var p10052Var = "object " +var p10052 = &p10052Var +var p10056Var = "object " +var p10056 = &p10056Var +var p10059Var = "object " +var p10059 = &p10059Var +var p10062Var = "object " +var p10062 = &p10062Var +var p10065Var = "object " +var p10065 = &p10065Var +var p10068Var = "object " +var p10068 = &p10068Var +var p10071Var = "object " +var p10071 = &p10071Var +var p10074Var = "object " +var p10074 = &p10074Var +var p10079Var = "thunk from >" +var p10079 = &p10079Var +var p10081Var = "thunk from >" +var p10081 = &p10081Var +var p10087Var = "function " +var p10087 = &p10087Var +var p10108Var = "thunk from >" +var p10108 = &p10108Var +var p10125Var = "thunk from >" +var p10125 = &p10125Var +var p10144Var = "object " +var p10144 = &p10144Var +var p10146Var = "object " +var p10146 = &p10146Var +var p10170Var = "object " +var p10170 = &p10170Var +var p10174Var = "object " +var p10174 = &p10174Var +var p10177Var = "object " +var p10177 = &p10177Var +var p10180Var = "object " +var p10180 = &p10180Var +var p10183Var = "object " +var p10183 = &p10183Var +var p10186Var = "object " +var p10186 = &p10186Var +var p10189Var = "object " +var p10189 = &p10189Var +var p10192Var = "object " +var p10192 = &p10192Var +var p10197Var = "thunk from >" +var p10197 = &p10197Var +var p10199Var = "thunk from >" +var p10199 = &p10199Var +var p10205Var = "function " +var p10205 = &p10205Var +var p10226Var = "thunk from >" +var p10226 = &p10226Var +var p10243Var = "thunk from >" +var p10243 = &p10243Var +var p10267Var = "object " +var p10267 = &p10267Var +var p10269Var = "object " var p10269 = &p10269Var -var p10292Var = "thunk from >" -var p10292 = &p10292Var -var p10302Var = "thunk from >" -var p10302 = &p10302Var -var p10314Var = "function " -var p10314 = &p10314Var -var p10335Var = "thunk from >" -var p10335 = &p10335Var -var p10352Var = "thunk from >" -var p10352 = &p10352Var -var p10368Var = "thunk from >" -var p10368 = &p10368Var -var p10384Var = "thunk from >>" -var p10384 = &p10384Var -var p10393Var = "thunk from >>>" -var p10393 = &p10393Var -var p10404Var = "function " -var p10404 = &p10404Var -var p10423Var = "thunk from >" -var p10423 = &p10423Var -var p10452Var = "thunk from from >>" -var p10452 = &p10452Var -var p10457Var = "thunk from from >>>" +var p10293Var = "object " +var p10293 = &p10293Var +var p10297Var = "object " +var p10297 = &p10297Var +var p10300Var = "object " +var p10300 = &p10300Var +var p10303Var = "object " +var p10303 = &p10303Var +var p10306Var = "object " +var p10306 = &p10306Var +var p10309Var = "object " +var p10309 = &p10309Var +var p10312Var = "object " +var p10312 = &p10312Var +var p10315Var = "object " +var p10315 = &p10315Var +var p10320Var = "thunk from >" +var p10320 = &p10320Var +var p10322Var = "thunk from >" +var p10322 = &p10322Var +var p10328Var = "function " +var p10328 = &p10328Var +var p10349Var = "thunk from >" +var p10349 = &p10349Var +var p10366Var = "thunk from >" +var p10366 = &p10366Var +var p10389Var = "thunk from >" +var p10389 = &p10389Var +var p10406Var = "thunk from >" +var p10406 = &p10406Var +var p10424Var = "object " +var p10424 = &p10424Var +var p10426Var = "object " +var p10426 = &p10426Var +var p10450Var = "object " +var p10450 = &p10450Var +var p10454Var = "object " +var p10454 = &p10454Var +var p10457Var = "object " var p10457 = &p10457Var -var p10466Var = "thunk from from >>>>" +var p10460Var = "object " +var p10460 = &p10460Var +var p10463Var = "object " +var p10463 = &p10463Var +var p10466Var = "object " var p10466 = &p10466Var -var p10477Var = "thunk from from >>>>" +var p10469Var = "object " +var p10469 = &p10469Var +var p10472Var = "object " +var p10472 = &p10472Var +var p10477Var = "thunk from >" var p10477 = &p10477Var -var p10484Var = "thunk from >" -var p10484 = &p10484Var -var p10493Var = "thunk from from >>" -var p10493 = &p10493Var -var p10508Var = "thunk from >" -var p10508 = &p10508Var -var p10517Var = "thunk from >>" -var p10517 = &p10517Var -var p10539Var = "thunk from >" -var p10539 = &p10539Var -var p10555Var = "thunk from >" -var p10555 = &p10555Var -var p10564Var = "thunk from >>" -var p10564 = &p10564Var -var p10580Var = "thunk from >>>" -var p10580 = &p10580Var -var p10589Var = "thunk from >>>>" -var p10589 = &p10589Var -var p10612Var = "thunk from >" -var p10612 = &p10612Var -var p10628Var = "thunk from >" -var p10628 = &p10628Var -var p10637Var = "thunk from >>" -var p10637 = &p10637Var -var p10658Var = "thunk from >" -var p10658 = &p10658Var -var p10682Var = "thunk from >" -var p10682 = &p10682Var -var p10694Var = "thunk from >" -var p10694 = &p10694Var -var p10745Var = "function " +var p10479Var = "thunk from >" +var p10479 = &p10479Var +var p10485Var = "function " +var p10485 = &p10485Var +var p10506Var = "thunk from >" +var p10506 = &p10506Var +var p10523Var = "thunk from >" +var p10523 = &p10523Var +var p10546Var = "thunk from >" +var p10546 = &p10546Var +var p10563Var = "thunk from >" +var p10563 = &p10563Var +var p10581Var = "object " +var p10581 = &p10581Var +var p10583Var = "object " +var p10583 = &p10583Var +var p10607Var = "object " +var p10607 = &p10607Var +var p10611Var = "object " +var p10611 = &p10611Var +var p10614Var = "object " +var p10614 = &p10614Var +var p10617Var = "object " +var p10617 = &p10617Var +var p10620Var = "object " +var p10620 = &p10620Var +var p10623Var = "object " +var p10623 = &p10623Var +var p10626Var = "object " +var p10626 = &p10626Var +var p10629Var = "object " +var p10629 = &p10629Var +var p10634Var = "thunk from >" +var p10634 = &p10634Var +var p10636Var = "thunk from >" +var p10636 = &p10636Var +var p10642Var = "function " +var p10642 = &p10642Var +var p10651Var = "thunk from >" +var p10651 = &p10651Var +var p10654Var = "function " +var p10654 = &p10654Var +var p10667Var = "object " +var p10667 = &p10667Var +var p10669Var = "object " +var p10669 = &p10669Var +var p10693Var = "object " +var p10693 = &p10693Var +var p10697Var = "object " +var p10697 = &p10697Var +var p10700Var = "object " +var p10700 = &p10700Var +var p10703Var = "object " +var p10703 = &p10703Var +var p10706Var = "object " +var p10706 = &p10706Var +var p10709Var = "object " +var p10709 = &p10709Var +var p10712Var = "object " +var p10712 = &p10712Var +var p10715Var = "object " +var p10715 = &p10715Var +var p10720Var = "thunk from >" +var p10720 = &p10720Var +var p10722Var = "thunk from >" +var p10722 = &p10722Var +var p10728Var = "function " +var p10728 = &p10728Var +var p10732Var = "thunk from >" +var p10732 = &p10732Var +var p10736Var = "function " +var p10736 = &p10736Var +var p10745Var = "thunk from >" var p10745 = &p10745Var -var p10771Var = "thunk from from >>" -var p10771 = &p10771Var -var p10776Var = "thunk from from >>>" -var p10776 = &p10776Var -var p10787Var = "thunk from from >>>>" -var p10787 = &p10787Var -var p10794Var = "thunk from >" -var p10794 = &p10794Var -var p10803Var = "thunk from from >>" -var p10803 = &p10803Var -var p10814Var = "thunk from >" -var p10814 = &p10814Var -var p10822Var = "thunk from >>" -var p10822 = &p10822Var -var p10829Var = "function " -var p10829 = &p10829Var -var p10842Var = "thunk from >" -var p10842 = &p10842Var -var p10864Var = "thunk from >" -var p10864 = &p10864Var -var p10870Var = "thunk from >" -var p10870 = &p10870Var -var p10874Var = "function " -var p10874 = &p10874Var -var p10885Var = "thunk from >" -var p10885 = &p10885Var -var p10893Var = "thunk from >" -var p10893 = &p10893Var -var p10902Var = "thunk from >" -var p10902 = &p10902Var -var p10915Var = "thunk from from >>" -var p10915 = &p10915Var -var p10935Var = "thunk from from >>" -var p10935 = &p10935Var -var p10945Var = "thunk from >" -var p10945 = &p10945Var -var p10959Var = "thunk from >" -var p10959 = &p10959Var -var p10993Var = "thunk from >" -var p10993 = &p10993Var -var p11002Var = "thunk from from >>" -var p11002 = &p11002Var -var p11026Var = "thunk from from >>>" -var p11026 = &p11026Var -var p11031Var = "thunk from from >>>>" +var p10761Var = "thunk from >>" +var p10761 = &p10761Var +var p10765Var = "thunk from >>>" +var p10765 = &p10765Var +var p10790Var = "thunk from >>>" +var p10790 = &p10790Var +var p10816Var = "thunk from >>>" +var p10816 = &p10816Var +var p10821Var = "thunk from >>>>" +var p10821 = &p10821Var +var p10839Var = "thunk from >>>" +var p10839 = &p10839Var +var p10844Var = "thunk from >>>>" +var p10844 = &p10844Var +var p10857Var = "thunk from >>" +var p10857 = &p10857Var +var p10863Var = "thunk from >" +var p10863 = &p10863Var +var p10867Var = "function " +var p10867 = &p10867Var +var p10881Var = "thunk from >" +var p10881 = &p10881Var +var p10886Var = "thunk from >>" +var p10886 = &p10886Var +var p10894Var = "thunk from >" +var p10894 = &p10894Var +var p10897Var = "thunk from >" +var p10897 = &p10897Var +var p10908Var = "thunk from from >>" +var p10908 = &p10908Var +var p10917Var = "thunk from from >>" +var p10917 = &p10917Var +var p10938Var = "thunk from from >>" +var p10938 = &p10938Var +var p10944Var = "thunk from from >>>" +var p10944 = &p10944Var +var p10956Var = "thunk from >" +var p10956 = &p10956Var +var p10965Var = "thunk from from >>" +var p10965 = &p10965Var +var p10979Var = "thunk from >" +var p10979 = &p10979Var +var p10995Var = "thunk from >>" +var p10995 = &p10995Var +var p11000Var = "thunk from >>" +var p11000 = &p11000Var +var p11005Var = "object " +var p11005 = &p11005Var +var p11007Var = "object " +var p11007 = &p11007Var +var p11031Var = "object " var p11031 = &p11031Var -var p11048Var = "thunk from from >>>" -var p11048 = &p11048Var -var p11059Var = "thunk from >" -var p11059 = &p11059Var -var p11063Var = "thunk from >>" -var p11063 = &p11063Var -var p11084Var = "thunk from >>>" -var p11084 = &p11084Var -var p11090Var = "thunk from >>>>" -var p11090 = &p11090Var -var p11104Var = "thunk from >" -var p11104 = &p11104Var -var p11112Var = "function " -var p11112 = &p11112Var -var p11116Var = "thunk from >" -var p11116 = &p11116Var -var p11135Var = "thunk from from >>" -var p11135 = &p11135Var -var p11147Var = "thunk from from >>" -var p11147 = &p11147Var -var p11151Var = "function " -var p11151 = &p11151Var -var p11160Var = "thunk from >" -var p11160 = &p11160Var -var p11170Var = "thunk from >" -var p11170 = &p11170Var -var p11174Var = "function " -var p11174 = &p11174Var -var p11189Var = "thunk from >" -var p11189 = &p11189Var -var p11211Var = "thunk from >" -var p11211 = &p11211Var -var p11217Var = "thunk from >" -var p11217 = &p11217Var -var p11260Var = "thunk from >" -var p11260 = &p11260Var -var p11291Var = "thunk from >" +var p11035Var = "object " +var p11035 = &p11035Var +var p11038Var = "object " +var p11038 = &p11038Var +var p11041Var = "object " +var p11041 = &p11041Var +var p11044Var = "object " +var p11044 = &p11044Var +var p11047Var = "object " +var p11047 = &p11047Var +var p11050Var = "object " +var p11050 = &p11050Var +var p11053Var = "object " +var p11053 = &p11053Var +var p11058Var = "thunk from >" +var p11058 = &p11058Var +var p11060Var = "thunk from >" +var p11060 = &p11060Var +var p11066Var = "function " +var p11066 = &p11066Var +var p11070Var = "thunk from >" +var p11070 = &p11070Var +var p11079Var = "thunk from from >>" +var p11079 = &p11079Var +var p11085Var = "thunk from >" +var p11085 = &p11085Var +var p11089Var = "function " +var p11089 = &p11089Var +var p11191Var = "thunk from >" +var p11191 = &p11191Var +var p11200Var = "thunk from from >>" +var p11200 = &p11200Var +var p11236Var = "thunk from >" +var p11236 = &p11236Var +var p11258Var = "thunk from >" +var p11258 = &p11258Var +var p11274Var = "thunk from >>" +var p11274 = &p11274Var +var p11280Var = "thunk from >>>" +var p11280 = &p11280Var +var p11291Var = "thunk from >>" var p11291 = &p11291Var -var p11297Var = "thunk from >" +var p11297Var = "object " var p11297 = &p11297Var -var p11378Var = "thunk from >" -var p11378 = &p11378Var -var p11395Var = "thunk from >" -var p11395 = &p11395Var -var p11508Var = "thunk from >" -var p11508 = &p11508Var -var p11525Var = "thunk from >" -var p11525 = &p11525Var -var p11534Var = "thunk from from >>" -var p11534 = &p11534Var -var p11537Var = "function " -var p11537 = &p11537Var -var p11563Var = "thunk from >" +var p11299Var = "object " +var p11299 = &p11299Var +var p11323Var = "object " +var p11323 = &p11323Var +var p11327Var = "object " +var p11327 = &p11327Var +var p11330Var = "object " +var p11330 = &p11330Var +var p11333Var = "object " +var p11333 = &p11333Var +var p11336Var = "object " +var p11336 = &p11336Var +var p11339Var = "object " +var p11339 = &p11339Var +var p11342Var = "object " +var p11342 = &p11342Var +var p11345Var = "object " +var p11345 = &p11345Var +var p11350Var = "thunk from >" +var p11350 = &p11350Var +var p11352Var = "thunk from >" +var p11352 = &p11352Var +var p11358Var = "function " +var p11358 = &p11358Var +var p11367Var = "thunk from >" +var p11367 = &p11367Var +var p11373Var = "object " +var p11373 = &p11373Var +var p11375Var = "object " +var p11375 = &p11375Var +var p11399Var = "object " +var p11399 = &p11399Var +var p11403Var = "object " +var p11403 = &p11403Var +var p11406Var = "object " +var p11406 = &p11406Var +var p11409Var = "object " +var p11409 = &p11409Var +var p11412Var = "object " +var p11412 = &p11412Var +var p11415Var = "object " +var p11415 = &p11415Var +var p11418Var = "object " +var p11418 = &p11418Var +var p11421Var = "object " +var p11421 = &p11421Var +var p11426Var = "thunk from >" +var p11426 = &p11426Var +var p11428Var = "thunk from >" +var p11428 = &p11428Var +var p11434Var = "function " +var p11434 = &p11434Var +var p11438Var = "thunk from >" +var p11438 = &p11438Var +var p11447Var = "thunk from from >>" +var p11447 = &p11447Var +var p11453Var = "thunk from >" +var p11453 = &p11453Var +var p11457Var = "function " +var p11457 = &p11457Var +var p11491Var = "thunk from >" +var p11491 = &p11491Var +var p11507Var = "thunk from >>" +var p11507 = &p11507Var +var p11513Var = "thunk from >>>" +var p11513 = &p11513Var +var p11524Var = "thunk from >>" +var p11524 = &p11524Var +var p11530Var = "object " +var p11530 = &p11530Var +var p11532Var = "object " +var p11532 = &p11532Var +var p11556Var = "object " +var p11556 = &p11556Var +var p11560Var = "object " +var p11560 = &p11560Var +var p11563Var = "object " var p11563 = &p11563Var -var p11573Var = "function " -var p11573 = &p11573Var -var p11602Var = "thunk from >" -var p11602 = &p11602Var -var p11623Var = "thunk from >" -var p11623 = &p11623Var -var p11627Var = "function " -var p11627 = &p11627Var -var p11642Var = "thunk from >" -var p11642 = &p11642Var -var p11650Var = "thunk from >" -var p11650 = &p11650Var -var p11654Var = "thunk from from >>" -var p11654 = &p11654Var -var p11689Var = "thunk from >" -var p11689 = &p11689Var -var p11714Var = "thunk from from >>" -var p11714 = &p11714Var -var p11755Var = "thunk from >" -var p11755 = &p11755Var -var p11780Var = "thunk from from >>" -var p11780 = &p11780Var -var p11820Var = "thunk from >" -var p11820 = &p11820Var -var p11847Var = "thunk from >" -var p11847 = &p11847Var -var p11857Var = "function " -var p11857 = &p11857Var -var p11861Var = "thunk from >" -var p11861 = &p11861Var -var p11870Var = "thunk from from >>" -var p11870 = &p11870Var -var p11881Var = "thunk from >" -var p11881 = &p11881Var -var p11891Var = "thunk from >>" -var p11891 = &p11891Var -var p11895Var = "function " -var p11895 = &p11895Var -var p11904Var = "thunk from >" -var p11904 = &p11904Var -var p11914Var = "function " -var p11914 = &p11914Var -var p11918Var = "thunk from >" -var p11918 = &p11918Var -var p11927Var = "thunk from from >>" -var p11927 = &p11927Var -var p11948Var = "thunk from >" -var p11948 = &p11948Var -var p11956Var = "thunk from >" -var p11956 = &p11956Var -var p11965Var = "thunk from >" -var p11965 = &p11965Var -var p11974Var = "thunk from from >>" +var p11566Var = "object " +var p11566 = &p11566Var +var p11569Var = "object " +var p11569 = &p11569Var +var p11572Var = "object " +var p11572 = &p11572Var +var p11575Var = "object " +var p11575 = &p11575Var +var p11578Var = "object " +var p11578 = &p11578Var +var p11583Var = "thunk from >" +var p11583 = &p11583Var +var p11585Var = "thunk from >" +var p11585 = &p11585Var +var p11591Var = "function " +var p11591 = &p11591Var +var p11595Var = "thunk from >" +var p11595 = &p11595Var +var p11604Var = "thunk from from >>" +var p11604 = &p11604Var +var p11610Var = "thunk from >" +var p11610 = &p11610Var +var p11614Var = "function " +var p11614 = &p11614Var +var p11639Var = "thunk from >" +var p11639 = &p11639Var +var p11643Var = "function " +var p11643 = &p11643Var +var p11653Var = "thunk from >" +var p11653 = &p11653Var +var p11664Var = "thunk from >>" +var p11664 = &p11664Var +var p11671Var = "object " +var p11671 = &p11671Var +var p11673Var = "object " +var p11673 = &p11673Var +var p11697Var = "object " +var p11697 = &p11697Var +var p11701Var = "object " +var p11701 = &p11701Var +var p11704Var = "object " +var p11704 = &p11704Var +var p11707Var = "object " +var p11707 = &p11707Var +var p11710Var = "object " +var p11710 = &p11710Var +var p11713Var = "object " +var p11713 = &p11713Var +var p11716Var = "object " +var p11716 = &p11716Var +var p11719Var = "object " +var p11719 = &p11719Var +var p11724Var = "thunk from >" +var p11724 = &p11724Var +var p11726Var = "thunk from >" +var p11726 = &p11726Var +var p11732Var = "function " +var p11732 = &p11732Var +var p11741Var = "thunk from >" +var p11741 = &p11741Var +var p11748Var = "object " +var p11748 = &p11748Var +var p11750Var = "object " +var p11750 = &p11750Var +var p11774Var = "object " +var p11774 = &p11774Var +var p11778Var = "object " +var p11778 = &p11778Var +var p11781Var = "object " +var p11781 = &p11781Var +var p11784Var = "object " +var p11784 = &p11784Var +var p11787Var = "object " +var p11787 = &p11787Var +var p11790Var = "object " +var p11790 = &p11790Var +var p11793Var = "object " +var p11793 = &p11793Var +var p11796Var = "object " +var p11796 = &p11796Var +var p11801Var = "thunk from >" +var p11801 = &p11801Var +var p11803Var = "thunk from >" +var p11803 = &p11803Var +var p11809Var = "function " +var p11809 = &p11809Var +var p11813Var = "thunk from >" +var p11813 = &p11813Var +var p11817Var = "function " +var p11817 = &p11817Var +var p11878Var = "thunk from >" +var p11878 = &p11878Var +var p11905Var = "thunk from >" +var p11905 = &p11905Var +var p11917Var = "thunk from >" +var p11917 = &p11917Var +var p11938Var = "thunk from >" +var p11938 = &p11938Var +var p11967Var = "thunk from >" +var p11967 = &p11967Var +var p11974Var = "thunk from >" var p11974 = &p11974Var -var p11983Var = "function " +var p11983Var = "thunk from from >>" var p11983 = &p11983Var -var p11996Var = "thunk from >" -var p11996 = &p11996Var -var p12005Var = "thunk from from >>" -var p12005 = &p12005Var -var p12009Var = "function " -var p12009 = &p12009Var -var p12021Var = "thunk from >" -var p12021 = &p12021Var -var p12030Var = "thunk from from >>" -var p12030 = &p12030Var -var p12034Var = "function " -var p12034 = &p12034Var -var p12055Var = "thunk from >" -var p12055 = &p12055Var -var p12061Var = "thunk from >" -var p12061 = &p12061Var -var p12072Var = "thunk from >" -var p12072 = &p12072Var -var p12080Var = "function " -var p12080 = &p12080Var -var p12084Var = "thunk from >" -var p12084 = &p12084Var -var p12088Var = "function " -var p12088 = &p12088Var -var p12107Var = "thunk from >" -var p12107 = &p12107Var -var p12114Var = "thunk from >" -var p12114 = &p12114Var -var p12141Var = "thunk from >" -var p12141 = &p12141Var -var p12156Var = "thunk from >" -var p12156 = &p12156Var -var p12167Var = "thunk from >" -var p12167 = &p12167Var -var p12178Var = "function " -var p12178 = &p12178Var -var p12187Var = "thunk from >" -var p12187 = &p12187Var -var p12196Var = "thunk from >>" -var p12196 = &p12196Var -var p12204Var = "function " -var p12204 = &p12204Var -var p12215Var = "thunk from >" -var p12215 = &p12215Var -var p12224Var = "thunk from >>" -var p12224 = &p12224Var -var p12228Var = "thunk from >>>" -var p12228 = &p12228Var -var p12239Var = "function " +var p11995Var = "thunk from from >>>" +var p11995 = &p11995Var +var p12002Var = "thunk from >" +var p12002 = &p12002Var +var p12012Var = "thunk from >" +var p12012 = &p12012Var +var p12019Var = "thunk from from >>" +var p12019 = &p12019Var +var p12029Var = "thunk from from >>" +var p12029 = &p12029Var +var p12032Var = "thunk from from >>>" +var p12032 = &p12032Var +var p12048Var = "thunk from from >>>" +var p12048 = &p12048Var +var p12052Var = "thunk from from >>>>" +var p12052 = &p12052Var +var p12062Var = "thunk from from >>>>>" +var p12062 = &p12062Var +var p12076Var = "thunk from from >>>>>>" +var p12076 = &p12076Var +var p12086Var = "thunk from from >>" +var p12086 = &p12086Var +var p12103Var = "thunk from >" +var p12103 = &p12103Var +var p12125Var = "thunk from >" +var p12125 = &p12125Var +var p12132Var = "thunk from >" +var p12132 = &p12132Var +var p12139Var = "thunk from from >>" +var p12139 = &p12139Var +var p12149Var = "thunk from from >>" +var p12149 = &p12149Var +var p12152Var = "thunk from from >>>" +var p12152 = &p12152Var +var p12168Var = "thunk from from >>>" +var p12168 = &p12168Var +var p12172Var = "thunk from from >>>>" +var p12172 = &p12172Var +var p12193Var = "thunk from from >>>>>" +var p12193 = &p12193Var +var p12202Var = "thunk from from >>>>>" +var p12202 = &p12202Var +var p12216Var = "thunk from from >>>>>>" +var p12216 = &p12216Var +var p12233Var = "thunk from from >>>" +var p12233 = &p12233Var +var p12239Var = "thunk from from >>" var p12239 = &p12239Var -var p12248Var = "thunk from >" -var p12248 = &p12248Var -var p12260Var = "function " -var p12260 = &p12260Var -var p12264Var = "thunk from >" -var p12264 = &p12264Var -var p12268Var = "function " -var p12268 = &p12268Var -var p12285Var = "thunk from >" -var p12285 = &p12285Var -var p12300Var = "thunk from >" +var p12256Var = "thunk from >" +var p12256 = &p12256Var +var p12266Var = "thunk from >" +var p12266 = &p12266Var +var p12274Var = "object " +var p12274 = &p12274Var +var p12276Var = "object " +var p12276 = &p12276Var +var p12300Var = "object " var p12300 = &p12300Var -var p12332Var = "thunk from >" -var p12332 = &p12332Var -var p12354Var = "thunk from >>" -var p12354 = &p12354Var -var p12382Var = "thunk from >" -var p12382 = &p12382Var -var p12401Var = "thunk from >" -var p12401 = &p12401Var -var p12420Var = "thunk from >" -var p12420 = &p12420Var -var p12433Var = "function " -var p12433 = &p12433Var -var p12437Var = "thunk from >" -var p12437 = &p12437Var -var p12441Var = "function " -var p12441 = &p12441Var -var p12456Var = "thunk from >" -var p12456 = &p12456Var -var p12475Var = "thunk from >" -var p12475 = &p12475Var -var p12483Var = "thunk from >" -var p12483 = &p12483Var -var p12502Var = "thunk from >>" -var p12502 = &p12502Var -var p12536Var = "thunk from >" -var p12536 = &p12536Var -var p12574Var = "thunk from >" -var p12574 = &p12574Var -var p12593Var = "thunk from >>" -var p12593 = &p12593Var -var p12605Var = "thunk from >" -var p12605 = &p12605Var -var p12624Var = "thunk from >" -var p12624 = &p12624Var -var p12637Var = "function " -var p12637 = &p12637Var -var p12656Var = "thunk from >" -var p12656 = &p12656Var -var p12663Var = "thunk from >" -var p12663 = &p12663Var -var p12682Var = "thunk from from >>" -var p12682 = &p12682Var -var p12692Var = "thunk from >" -var p12692 = &p12692Var -var p12711Var = "thunk from from >>" -var p12711 = &p12711Var -var p12723Var = "thunk from from >>" -var p12723 = &p12723Var -var p12751Var = "thunk from >" +var p12304Var = "object " +var p12304 = &p12304Var +var p12307Var = "object " +var p12307 = &p12307Var +var p12310Var = "object " +var p12310 = &p12310Var +var p12313Var = "object " +var p12313 = &p12313Var +var p12316Var = "object " +var p12316 = &p12316Var +var p12319Var = "object " +var p12319 = &p12319Var +var p12322Var = "object " +var p12322 = &p12322Var +var p12327Var = "thunk from >" +var p12327 = &p12327Var +var p12329Var = "thunk from >" +var p12329 = &p12329Var +var p12335Var = "function " +var p12335 = &p12335Var +var p12339Var = "thunk from >" +var p12339 = &p12339Var +var p12343Var = "function " +var p12343 = &p12343Var +var p12404Var = "thunk from >" +var p12404 = &p12404Var +var p12431Var = "thunk from >" +var p12431 = &p12431Var +var p12438Var = "thunk from >" +var p12438 = &p12438Var +var p12447Var = "thunk from from >>" +var p12447 = &p12447Var +var p12487Var = "thunk from >" +var p12487 = &p12487Var +var p12496Var = "thunk from from >>" +var p12496 = &p12496Var +var p12508Var = "thunk from >" +var p12508 = &p12508Var +var p12518Var = "thunk from >>" +var p12518 = &p12518Var +var p12541Var = "thunk from >>" +var p12541 = &p12541Var +var p12554Var = "thunk from >" +var p12554 = &p12554Var +var p12575Var = "thunk from >" +var p12575 = &p12575Var +var p12604Var = "thunk from >" +var p12604 = &p12604Var +var p12626Var = "thunk from >" +var p12626 = &p12626Var +var p12634Var = "thunk from >" +var p12634 = &p12634Var +var p12643Var = "thunk from from >>" +var p12643 = &p12643Var +var p12655Var = "thunk from from >>>" +var p12655 = &p12655Var +var p12662Var = "thunk from >" +var p12662 = &p12662Var +var p12699Var = "thunk from from >>" +var p12699 = &p12699Var +var p12705Var = "thunk from from >>>" +var p12705 = &p12705Var +var p12720Var = "thunk from from >>>>" +var p12720 = &p12720Var +var p12725Var = "thunk from >" +var p12725 = &p12725Var +var p12751Var = "thunk from >" var p12751 = &p12751Var -var p12762Var = "thunk from from >>" -var p12762 = &p12762Var -var p12774Var = "thunk from from >>" -var p12774 = &p12774Var -var p12780Var = "thunk from >" +var p12780Var = "thunk from >" var p12780 = &p12780Var -var p12789Var = "thunk from from >>" -var p12789 = &p12789Var -var p12800Var = "thunk from from >>>" -var p12800 = &p12800Var -var p12830Var = "object " -var p12830 = &p12830Var -var p12843Var = "thunk from >" -var p12843 = &p12843Var -var p12866Var = "thunk from >" -var p12866 = &p12866Var -var p12879Var = "thunk from >" -var p12879 = &p12879Var -var p12895Var = "thunk from >" -var p12895 = &p12895Var -var p12916Var = "thunk from >" -var p12916 = &p12916Var -var p12928Var = "function " -var p12928 = &p12928Var -var p12937Var = "thunk from >" -var p12937 = &p12937Var -var p12946Var = "function " -var p12946 = &p12946Var -var p12955Var = "thunk from >" -var p12955 = &p12955Var -var p12964Var = "function " -var p12964 = &p12964Var -var p12973Var = "thunk from >" -var p12973 = &p12973Var -var p12984Var = "function " -var p12984 = &p12984Var -var p12993Var = "thunk from >" -var p12993 = &p12993Var -var p13004Var = "function " -var p13004 = &p13004Var -var p13008Var = "thunk from >" -var p13008 = &p13008Var -var p13017Var = "thunk from from >>" -var p13017 = &p13017Var -var p13023Var = "thunk from >" -var p13023 = &p13023Var -var p13032Var = "thunk from from >>" -var p13032 = &p13032Var -var p13047Var = "thunk from >" -var p13047 = &p13047Var -var p13063Var = "thunk from >" -var p13063 = &p13063Var -var p13070Var = "thunk from >" +var p12802Var = "thunk from >" +var p12802 = &p12802Var +var p12810Var = "thunk from >" +var p12810 = &p12810Var +var p12833Var = "thunk from from >>" +var p12833 = &p12833Var +var p12846Var = "thunk from from >>>" +var p12846 = &p12846Var +var p12855Var = "thunk from from >>>" +var p12855 = &p12855Var +var p12870Var = "thunk from from >>>>" +var p12870 = &p12870Var +var p12875Var = "thunk from >" +var p12875 = &p12875Var +var p12884Var = "thunk from from >>" +var p12884 = &p12884Var +var p12907Var = "thunk from >" +var p12907 = &p12907Var +var p12921Var = "thunk from >" +var p12921 = &p12921Var +var p12930Var = "object " +var p12930 = &p12930Var +var p12932Var = "object " +var p12932 = &p12932Var +var p12956Var = "object " +var p12956 = &p12956Var +var p12960Var = "object " +var p12960 = &p12960Var +var p12963Var = "object " +var p12963 = &p12963Var +var p12966Var = "object " +var p12966 = &p12966Var +var p12969Var = "object " +var p12969 = &p12969Var +var p12972Var = "object " +var p12972 = &p12972Var +var p12975Var = "object " +var p12975 = &p12975Var +var p12978Var = "object " +var p12978 = &p12978Var +var p12983Var = "thunk from >" +var p12983 = &p12983Var +var p12985Var = "thunk from >" +var p12985 = &p12985Var +var p12991Var = "function " +var p12991 = &p12991Var +var p13012Var = "thunk from >" +var p13012 = &p13012Var +var p13029Var = "thunk from >" +var p13029 = &p13029Var +var p13045Var = "thunk from >" +var p13045 = &p13045Var +var p13061Var = "thunk from >>" +var p13061 = &p13061Var +var p13070Var = "thunk from >>>" var p13070 = &p13070Var -var p13079Var = "thunk from from >>" +var p13079Var = "object " var p13079 = &p13079Var -var p13094Var = "thunk from >" -var p13094 = &p13094Var -var p13105Var = "thunk from >>" +var p13081Var = "object " +var p13081 = &p13081Var +var p13105Var = "object " var p13105 = &p13105Var -var p13112Var = "thunk from >" +var p13109Var = "object " +var p13109 = &p13109Var +var p13112Var = "object " var p13112 = &p13112Var -var p13116Var = "function " -var p13116 = &p13116Var -var p13156Var = "thunk from >" -var p13156 = &p13156Var -var p13171Var = "thunk from >" -var p13171 = &p13171Var -var p13187Var = "thunk from >" -var p13187 = &p13187Var -var p13194Var = "thunk from >" -var p13194 = &p13194Var -var p13203Var = "thunk from from >>" -var p13203 = &p13203Var -var p13209Var = "thunk from >" -var p13209 = &p13209Var -var p13218Var = "thunk from from >>" -var p13218 = &p13218Var -var p13243Var = "thunk from >" -var p13243 = &p13243Var -var p13250Var = "thunk from >" -var p13250 = &p13250Var -var p13254Var = "function " -var p13254 = &p13254Var -var p13269Var = "thunk from >" -var p13269 = &p13269Var -var p13304Var = "thunk from >" -var p13304 = &p13304Var -var p13319Var = "thunk from >" -var p13319 = &p13319Var -var p13333Var = "thunk from >" -var p13333 = &p13333Var -var p13343Var = "function " -var p13343 = &p13343Var -var p13347Var = "thunk from >" -var p13347 = &p13347Var -var p13356Var = "thunk from from >>" -var p13356 = &p13356Var -var p13368Var = "thunk from >" -var p13368 = &p13368Var -var p13380Var = "thunk from >>" -var p13380 = &p13380Var -var p13391Var = "thunk from >>>" -var p13391 = &p13391Var -var p13398Var = "function " -var p13398 = &p13398Var -var p13408Var = "thunk from >>" -var p13408 = &p13408Var -var p13416Var = "function " -var p13416 = &p13416Var -var p13420Var = "thunk from >" -var p13420 = &p13420Var -var p13424Var = "function " -var p13424 = &p13424Var -var p13428Var = "thunk from >" -var p13428 = &p13428Var -var p13437Var = "thunk from from >>" -var p13437 = &p13437Var -var p13477Var = "thunk from >" -var p13477 = &p13477Var -var p13504Var = "thunk from >" -var p13504 = &p13504Var -var p13512Var = "thunk from >" +var p13115Var = "object " +var p13115 = &p13115Var +var p13118Var = "object " +var p13118 = &p13118Var +var p13121Var = "object " +var p13121 = &p13121Var +var p13124Var = "object " +var p13124 = &p13124Var +var p13127Var = "object " +var p13127 = &p13127Var +var p13132Var = "thunk from >" +var p13132 = &p13132Var +var p13134Var = "thunk from >" +var p13134 = &p13134Var +var p13140Var = "function " +var p13140 = &p13140Var +var p13159Var = "thunk from >" +var p13159 = &p13159Var +var p13188Var = "thunk from from >>" +var p13188 = &p13188Var +var p13193Var = "thunk from from >>>" +var p13193 = &p13193Var +var p13202Var = "thunk from from >>>>" +var p13202 = &p13202Var +var p13213Var = "thunk from from >>>>" +var p13213 = &p13213Var +var p13220Var = "thunk from >" +var p13220 = &p13220Var +var p13229Var = "thunk from from >>" +var p13229 = &p13229Var +var p13244Var = "thunk from >" +var p13244 = &p13244Var +var p13253Var = "thunk from >>" +var p13253 = &p13253Var +var p13275Var = "thunk from >" +var p13275 = &p13275Var +var p13291Var = "thunk from >" +var p13291 = &p13291Var +var p13300Var = "thunk from >>" +var p13300 = &p13300Var +var p13316Var = "thunk from >>>" +var p13316 = &p13316Var +var p13325Var = "thunk from >>>>" +var p13325 = &p13325Var +var p13348Var = "thunk from >" +var p13348 = &p13348Var +var p13364Var = "thunk from >" +var p13364 = &p13364Var +var p13373Var = "thunk from >>" +var p13373 = &p13373Var +var p13394Var = "thunk from >" +var p13394 = &p13394Var +var p13418Var = "thunk from >" +var p13418 = &p13418Var +var p13430Var = "thunk from >" +var p13430 = &p13430Var +var p13479Var = "object " +var p13479 = &p13479Var +var p13481Var = "object " +var p13481 = &p13481Var +var p13505Var = "object " +var p13505 = &p13505Var +var p13509Var = "object " +var p13509 = &p13509Var +var p13512Var = "object " var p13512 = &p13512Var -var p13521Var = "thunk from from >>" +var p13515Var = "object " +var p13515 = &p13515Var +var p13518Var = "object " +var p13518 = &p13518Var +var p13521Var = "object " var p13521 = &p13521Var -var p13555Var = "thunk from >" -var p13555 = &p13555Var -var p13564Var = "thunk from >>" -var p13564 = &p13564Var -var p13570Var = "thunk from >" -var p13570 = &p13570Var -var p13579Var = "thunk from >>" -var p13579 = &p13579Var -var p13624Var = "thunk from >" +var p13524Var = "object " +var p13524 = &p13524Var +var p13527Var = "object " +var p13527 = &p13527Var +var p13532Var = "thunk from >" +var p13532 = &p13532Var +var p13534Var = "thunk from >" +var p13534 = &p13534Var +var p13540Var = "function " +var p13540 = &p13540Var +var p13566Var = "thunk from from >>" +var p13566 = &p13566Var +var p13571Var = "thunk from from >>>" +var p13571 = &p13571Var +var p13582Var = "thunk from from >>>>" +var p13582 = &p13582Var +var p13589Var = "thunk from >" +var p13589 = &p13589Var +var p13598Var = "thunk from from >>" +var p13598 = &p13598Var +var p13609Var = "thunk from >" +var p13609 = &p13609Var +var p13617Var = "thunk from >>" +var p13617 = &p13617Var +var p13622Var = "object " +var p13622 = &p13622Var +var p13624Var = "object " var p13624 = &p13624Var -var p13633Var = "thunk from >>" -var p13633 = &p13633Var var p13648Var = "object " var p13648 = &p13648Var -var p13657Var = "thunk from >" -var p13657 = &p13657Var -var p13673Var = "thunk from >" -var p13673 = &p13673Var +var p13652Var = "object " +var p13652 = &p13652Var +var p13655Var = "object " +var p13655 = &p13655Var +var p13658Var = "object " +var p13658 = &p13658Var +var p13661Var = "object " +var p13661 = &p13661Var +var p13664Var = "object " +var p13664 = &p13664Var +var p13667Var = "object " +var p13667 = &p13667Var +var p13670Var = "object " +var p13670 = &p13670Var +var p13675Var = "thunk from >" +var p13675 = &p13675Var +var p13677Var = "thunk from >" +var p13677 = &p13677Var +var p13683Var = "function " +var p13683 = &p13683Var +var p13696Var = "thunk from >" +var p13696 = &p13696Var +var p13718Var = "thunk from >" +var p13718 = &p13718Var +var p13724Var = "thunk from >" +var p13724 = &p13724Var +var p13728Var = "function " +var p13728 = &p13728Var +var p13739Var = "thunk from >" +var p13739 = &p13739Var +var p13747Var = "thunk from >" +var p13747 = &p13747Var +var p13756Var = "thunk from >" +var p13756 = &p13756Var +var p13769Var = "thunk from from >>" +var p13769 = &p13769Var +var p13789Var = "thunk from from >>" +var p13789 = &p13789Var +var p13799Var = "thunk from >" +var p13799 = &p13799Var +var p13813Var = "thunk from >" +var p13813 = &p13813Var +var p13847Var = "thunk from >" +var p13847 = &p13847Var +var p13856Var = "thunk from from >>" +var p13856 = &p13856Var +var p13880Var = "thunk from from >>>" +var p13880 = &p13880Var +var p13885Var = "thunk from from >>>>" +var p13885 = &p13885Var +var p13902Var = "thunk from from >>>" +var p13902 = &p13902Var +var p13913Var = "thunk from >" +var p13913 = &p13913Var +var p13917Var = "thunk from >>" +var p13917 = &p13917Var +var p13938Var = "thunk from >>>" +var p13938 = &p13938Var +var p13944Var = "thunk from >>>>" +var p13944 = &p13944Var +var p13958Var = "thunk from >" +var p13958 = &p13958Var +var p13964Var = "object " +var p13964 = &p13964Var +var p13966Var = "object " +var p13966 = &p13966Var +var p13990Var = "object " +var p13990 = &p13990Var +var p13994Var = "object " +var p13994 = &p13994Var +var p13997Var = "object " +var p13997 = &p13997Var +var p14000Var = "object " +var p14000 = &p14000Var +var p14003Var = "object " +var p14003 = &p14003Var +var p14006Var = "object " +var p14006 = &p14006Var +var p14009Var = "object " +var p14009 = &p14009Var +var p14012Var = "object " +var p14012 = &p14012Var +var p14017Var = "thunk from >" +var p14017 = &p14017Var +var p14019Var = "thunk from >" +var p14019 = &p14019Var +var p14025Var = "function " +var p14025 = &p14025Var +var p14029Var = "thunk from >" +var p14029 = &p14029Var +var p14048Var = "thunk from from >>" +var p14048 = &p14048Var +var p14060Var = "thunk from from >>" +var p14060 = &p14060Var +var p14064Var = "function " +var p14064 = &p14064Var +var p14073Var = "thunk from >" +var p14073 = &p14073Var +var p14083Var = "thunk from >" +var p14083 = &p14083Var +var p14087Var = "function " +var p14087 = &p14087Var +var p14102Var = "thunk from >" +var p14102 = &p14102Var +var p14124Var = "thunk from >" +var p14124 = &p14124Var +var p14130Var = "thunk from >" +var p14130 = &p14130Var +var p14173Var = "thunk from >" +var p14173 = &p14173Var +var p14204Var = "thunk from >" +var p14204 = &p14204Var +var p14210Var = "thunk from >" +var p14210 = &p14210Var +var p14291Var = "thunk from >" +var p14291 = &p14291Var +var p14308Var = "thunk from >" +var p14308 = &p14308Var +var p14421Var = "thunk from >" +var p14421 = &p14421Var +var p14438Var = "thunk from >" +var p14438 = &p14438Var +var p14447Var = "thunk from from >>" +var p14447 = &p14447Var +var p14450Var = "function " +var p14450 = &p14450Var +var p14476Var = "thunk from >" +var p14476 = &p14476Var +var p14484Var = "object " +var p14484 = &p14484Var +var p14486Var = "object " +var p14486 = &p14486Var +var p14510Var = "object " +var p14510 = &p14510Var +var p14514Var = "object " +var p14514 = &p14514Var +var p14517Var = "object " +var p14517 = &p14517Var +var p14520Var = "object " +var p14520 = &p14520Var +var p14523Var = "object " +var p14523 = &p14523Var +var p14526Var = "object " +var p14526 = &p14526Var +var p14529Var = "object " +var p14529 = &p14529Var +var p14532Var = "object " +var p14532 = &p14532Var +var p14537Var = "thunk from >" +var p14537 = &p14537Var +var p14539Var = "thunk from >" +var p14539 = &p14539Var +var p14545Var = "function " +var p14545 = &p14545Var +var p14574Var = "thunk from >" +var p14574 = &p14574Var +var p14595Var = "thunk from >" +var p14595 = &p14595Var +var p14599Var = "function " +var p14599 = &p14599Var +var p14614Var = "thunk from >" +var p14614 = &p14614Var +var p14622Var = "thunk from >" +var p14622 = &p14622Var +var p14626Var = "thunk from from >>" +var p14626 = &p14626Var +var p14661Var = "thunk from >" +var p14661 = &p14661Var +var p14686Var = "thunk from from >>" +var p14686 = &p14686Var +var p14727Var = "thunk from >" +var p14727 = &p14727Var +var p14752Var = "thunk from from >>" +var p14752 = &p14752Var +var p14792Var = "thunk from >" +var p14792 = &p14792Var +var p14819Var = "thunk from >" +var p14819 = &p14819Var +var p14827Var = "object " +var p14827 = &p14827Var +var p14829Var = "object " +var p14829 = &p14829Var +var p14853Var = "object " +var p14853 = &p14853Var +var p14857Var = "object " +var p14857 = &p14857Var +var p14860Var = "object " +var p14860 = &p14860Var +var p14863Var = "object " +var p14863 = &p14863Var +var p14866Var = "object " +var p14866 = &p14866Var +var p14869Var = "object " +var p14869 = &p14869Var +var p14872Var = "object " +var p14872 = &p14872Var +var p14875Var = "object " +var p14875 = &p14875Var +var p14880Var = "thunk from >" +var p14880 = &p14880Var +var p14882Var = "thunk from >" +var p14882 = &p14882Var +var p14888Var = "function " +var p14888 = &p14888Var +var p14892Var = "thunk from >" +var p14892 = &p14892Var +var p14901Var = "thunk from from >>" +var p14901 = &p14901Var +var p14912Var = "thunk from >" +var p14912 = &p14912Var +var p14922Var = "thunk from >>" +var p14922 = &p14922Var +var p14926Var = "function " +var p14926 = &p14926Var +var p14935Var = "thunk from >" +var p14935 = &p14935Var +var p14943Var = "object " +var p14943 = &p14943Var +var p14945Var = "object " +var p14945 = &p14945Var +var p14969Var = "object " +var p14969 = &p14969Var +var p14973Var = "object " +var p14973 = &p14973Var +var p14976Var = "object " +var p14976 = &p14976Var +var p14979Var = "object " +var p14979 = &p14979Var +var p14982Var = "object " +var p14982 = &p14982Var +var p14985Var = "object " +var p14985 = &p14985Var +var p14988Var = "object " +var p14988 = &p14988Var +var p14991Var = "object " +var p14991 = &p14991Var +var p14996Var = "thunk from >" +var p14996 = &p14996Var +var p14998Var = "thunk from >" +var p14998 = &p14998Var +var p15004Var = "function " +var p15004 = &p15004Var +var p15008Var = "thunk from >" +var p15008 = &p15008Var +var p15017Var = "thunk from from >>" +var p15017 = &p15017Var +var p15038Var = "thunk from >" +var p15038 = &p15038Var +var p15046Var = "thunk from >" +var p15046 = &p15046Var +var p15055Var = "thunk from >" +var p15055 = &p15055Var +var p15064Var = "thunk from from >>" +var p15064 = &p15064Var +var p15073Var = "function " +var p15073 = &p15073Var +var p15086Var = "thunk from >" +var p15086 = &p15086Var +var p15095Var = "thunk from from >>" +var p15095 = &p15095Var +var p15099Var = "function " +var p15099 = &p15099Var +var p15111Var = "thunk from >" +var p15111 = &p15111Var +var p15120Var = "thunk from from >>" +var p15120 = &p15120Var +var p15124Var = "function " +var p15124 = &p15124Var +var p15145Var = "thunk from >" +var p15145 = &p15145Var +var p15151Var = "thunk from >" +var p15151 = &p15151Var +var p15162Var = "thunk from >" +var p15162 = &p15162Var +var p15168Var = "object " +var p15168 = &p15168Var +var p15170Var = "object " +var p15170 = &p15170Var +var p15194Var = "object " +var p15194 = &p15194Var +var p15198Var = "object " +var p15198 = &p15198Var +var p15201Var = "object " +var p15201 = &p15201Var +var p15204Var = "object " +var p15204 = &p15204Var +var p15207Var = "object " +var p15207 = &p15207Var +var p15210Var = "object " +var p15210 = &p15210Var +var p15213Var = "object " +var p15213 = &p15213Var +var p15216Var = "object " +var p15216 = &p15216Var +var p15221Var = "thunk from >" +var p15221 = &p15221Var +var p15223Var = "thunk from >" +var p15223 = &p15223Var +var p15229Var = "function " +var p15229 = &p15229Var +var p15233Var = "thunk from >" +var p15233 = &p15233Var +var p15237Var = "function " +var p15237 = &p15237Var +var p15256Var = "thunk from >" +var p15256 = &p15256Var +var p15263Var = "thunk from >" +var p15263 = &p15263Var +var p15290Var = "thunk from >" +var p15290 = &p15290Var +var p15305Var = "thunk from >" +var p15305 = &p15305Var +var p15316Var = "thunk from >" +var p15316 = &p15316Var +var p15325Var = "object " +var p15325 = &p15325Var +var p15327Var = "object " +var p15327 = &p15327Var +var p15351Var = "object " +var p15351 = &p15351Var +var p15355Var = "object " +var p15355 = &p15355Var +var p15358Var = "object " +var p15358 = &p15358Var +var p15361Var = "object " +var p15361 = &p15361Var +var p15364Var = "object " +var p15364 = &p15364Var +var p15367Var = "object " +var p15367 = &p15367Var +var p15370Var = "object " +var p15370 = &p15370Var +var p15373Var = "object " +var p15373 = &p15373Var +var p15378Var = "thunk from >" +var p15378 = &p15378Var +var p15380Var = "thunk from >" +var p15380 = &p15380Var +var p15386Var = "function " +var p15386 = &p15386Var +var p15395Var = "thunk from >" +var p15395 = &p15395Var +var p15404Var = "thunk from >>" +var p15404 = &p15404Var +var p15410Var = "object " +var p15410 = &p15410Var +var p15412Var = "object " +var p15412 = &p15412Var +var p15436Var = "object " +var p15436 = &p15436Var +var p15440Var = "object " +var p15440 = &p15440Var +var p15443Var = "object " +var p15443 = &p15443Var +var p15446Var = "object " +var p15446 = &p15446Var +var p15449Var = "object " +var p15449 = &p15449Var +var p15452Var = "object " +var p15452 = &p15452Var +var p15455Var = "object " +var p15455 = &p15455Var +var p15458Var = "object " +var p15458 = &p15458Var +var p15463Var = "thunk from >" +var p15463 = &p15463Var +var p15465Var = "thunk from >" +var p15465 = &p15465Var +var p15471Var = "function " +var p15471 = &p15471Var +var p15482Var = "thunk from >" +var p15482 = &p15482Var +var p15491Var = "thunk from >>" +var p15491 = &p15491Var +var p15495Var = "thunk from >>>" +var p15495 = &p15495Var +var p15504Var = "object " +var p15504 = &p15504Var +var p15506Var = "object " +var p15506 = &p15506Var +var p15530Var = "object " +var p15530 = &p15530Var +var p15534Var = "object " +var p15534 = &p15534Var +var p15537Var = "object " +var p15537 = &p15537Var +var p15540Var = "object " +var p15540 = &p15540Var +var p15543Var = "object " +var p15543 = &p15543Var +var p15546Var = "object " +var p15546 = &p15546Var +var p15549Var = "object " +var p15549 = &p15549Var +var p15552Var = "object " +var p15552 = &p15552Var +var p15557Var = "thunk from >" +var p15557 = &p15557Var +var p15559Var = "thunk from >" +var p15559 = &p15559Var +var p15565Var = "function " +var p15565 = &p15565Var +var p15574Var = "thunk from >" +var p15574 = &p15574Var +var p15584Var = "object " +var p15584 = &p15584Var +var p15586Var = "object " +var p15586 = &p15586Var +var p15610Var = "object " +var p15610 = &p15610Var +var p15614Var = "object " +var p15614 = &p15614Var +var p15617Var = "object " +var p15617 = &p15617Var +var p15620Var = "object " +var p15620 = &p15620Var +var p15623Var = "object " +var p15623 = &p15623Var +var p15626Var = "object " +var p15626 = &p15626Var +var p15629Var = "object " +var p15629 = &p15629Var +var p15632Var = "object " +var p15632 = &p15632Var +var p15637Var = "thunk from >" +var p15637 = &p15637Var +var p15639Var = "thunk from >" +var p15639 = &p15639Var +var p15645Var = "function " +var p15645 = &p15645Var +var p15649Var = "thunk from >" +var p15649 = &p15649Var +var p15653Var = "function " +var p15653 = &p15653Var +var p15670Var = "thunk from >" +var p15670 = &p15670Var +var p15685Var = "thunk from >" +var p15685 = &p15685Var +var p15717Var = "thunk from >" +var p15717 = &p15717Var +var p15739Var = "thunk from >>" +var p15739 = &p15739Var +var p15767Var = "thunk from >" +var p15767 = &p15767Var +var p15786Var = "thunk from >" +var p15786 = &p15786Var +var p15805Var = "thunk from >" +var p15805 = &p15805Var +var p15816Var = "object " +var p15816 = &p15816Var +var p15818Var = "object " +var p15818 = &p15818Var +var p15842Var = "object " +var p15842 = &p15842Var +var p15846Var = "object " +var p15846 = &p15846Var +var p15849Var = "object " +var p15849 = &p15849Var +var p15852Var = "object " +var p15852 = &p15852Var +var p15855Var = "object " +var p15855 = &p15855Var +var p15858Var = "object " +var p15858 = &p15858Var +var p15861Var = "object " +var p15861 = &p15861Var +var p15864Var = "object " +var p15864 = &p15864Var +var p15869Var = "thunk from >" +var p15869 = &p15869Var +var p15871Var = "thunk from >" +var p15871 = &p15871Var +var p15877Var = "function " +var p15877 = &p15877Var +var p15881Var = "thunk from >" +var p15881 = &p15881Var +var p15885Var = "function " +var p15885 = &p15885Var +var p15900Var = "thunk from >" +var p15900 = &p15900Var +var p15919Var = "thunk from >" +var p15919 = &p15919Var +var p15927Var = "thunk from >" +var p15927 = &p15927Var +var p15946Var = "thunk from >>" +var p15946 = &p15946Var +var p15980Var = "thunk from >" +var p15980 = &p15980Var +var p16018Var = "thunk from >" +var p16018 = &p16018Var +var p16037Var = "thunk from >>" +var p16037 = &p16037Var +var p16049Var = "thunk from >" +var p16049 = &p16049Var +var p16068Var = "thunk from >" +var p16068 = &p16068Var +var p16079Var = "object " +var p16079 = &p16079Var +var p16081Var = "object " +var p16081 = &p16081Var +var p16105Var = "object " +var p16105 = &p16105Var +var p16109Var = "object " +var p16109 = &p16109Var +var p16112Var = "object " +var p16112 = &p16112Var +var p16115Var = "object " +var p16115 = &p16115Var +var p16118Var = "object " +var p16118 = &p16118Var +var p16121Var = "object " +var p16121 = &p16121Var +var p16124Var = "object " +var p16124 = &p16124Var +var p16127Var = "object " +var p16127 = &p16127Var +var p16132Var = "thunk from >" +var p16132 = &p16132Var +var p16134Var = "thunk from >" +var p16134 = &p16134Var +var p16140Var = "function " +var p16140 = &p16140Var +var p16159Var = "thunk from >" +var p16159 = &p16159Var +var p16166Var = "thunk from >" +var p16166 = &p16166Var +var p16185Var = "thunk from from >>" +var p16185 = &p16185Var +var p16195Var = "thunk from >" +var p16195 = &p16195Var +var p16214Var = "thunk from from >>" +var p16214 = &p16214Var +var p16226Var = "thunk from from >>" +var p16226 = &p16226Var +var p16254Var = "thunk from >" +var p16254 = &p16254Var +var p16265Var = "thunk from from >>" +var p16265 = &p16265Var +var p16277Var = "thunk from from >>" +var p16277 = &p16277Var +var p16283Var = "thunk from >" +var p16283 = &p16283Var +var p16292Var = "thunk from from >>" +var p16292 = &p16292Var +var p16303Var = "thunk from from >>>" +var p16303 = &p16303Var +var p16333Var = "object " +var p16333 = &p16333Var +var p16346Var = "thunk from >" +var p16346 = &p16346Var +var p16369Var = "thunk from >" +var p16369 = &p16369Var +var p16382Var = "thunk from >" +var p16382 = &p16382Var +var p16398Var = "thunk from >" +var p16398 = &p16398Var +var p16419Var = "thunk from >" +var p16419 = &p16419Var +var p16429Var = "object " +var p16429 = &p16429Var +var p16431Var = "object " +var p16431 = &p16431Var +var p16455Var = "object " +var p16455 = &p16455Var +var p16459Var = "object " +var p16459 = &p16459Var +var p16462Var = "object " +var p16462 = &p16462Var +var p16465Var = "object " +var p16465 = &p16465Var +var p16468Var = "object " +var p16468 = &p16468Var +var p16471Var = "object " +var p16471 = &p16471Var +var p16474Var = "object " +var p16474 = &p16474Var +var p16477Var = "object " +var p16477 = &p16477Var +var p16482Var = "thunk from >" +var p16482 = &p16482Var +var p16484Var = "thunk from >" +var p16484 = &p16484Var +var p16490Var = "function " +var p16490 = &p16490Var +var p16499Var = "thunk from >" +var p16499 = &p16499Var +var p16506Var = "object " +var p16506 = &p16506Var +var p16508Var = "object " +var p16508 = &p16508Var +var p16532Var = "object " +var p16532 = &p16532Var +var p16536Var = "object " +var p16536 = &p16536Var +var p16539Var = "object " +var p16539 = &p16539Var +var p16542Var = "object " +var p16542 = &p16542Var +var p16545Var = "object " +var p16545 = &p16545Var +var p16548Var = "object " +var p16548 = &p16548Var +var p16551Var = "object " +var p16551 = &p16551Var +var p16554Var = "object " +var p16554 = &p16554Var +var p16559Var = "thunk from >" +var p16559 = &p16559Var +var p16561Var = "thunk from >" +var p16561 = &p16561Var +var p16567Var = "function " +var p16567 = &p16567Var +var p16576Var = "thunk from >" +var p16576 = &p16576Var +var p16583Var = "object " +var p16583 = &p16583Var +var p16585Var = "object " +var p16585 = &p16585Var +var p16609Var = "object " +var p16609 = &p16609Var +var p16613Var = "object " +var p16613 = &p16613Var +var p16616Var = "object " +var p16616 = &p16616Var +var p16619Var = "object " +var p16619 = &p16619Var +var p16622Var = "object " +var p16622 = &p16622Var +var p16625Var = "object " +var p16625 = &p16625Var +var p16628Var = "object " +var p16628 = &p16628Var +var p16631Var = "object " +var p16631 = &p16631Var +var p16636Var = "thunk from >" +var p16636 = &p16636Var +var p16638Var = "thunk from >" +var p16638 = &p16638Var +var p16644Var = "function " +var p16644 = &p16644Var +var p16653Var = "thunk from >" +var p16653 = &p16653Var +var p16662Var = "object " +var p16662 = &p16662Var +var p16664Var = "object " +var p16664 = &p16664Var +var p16688Var = "object " +var p16688 = &p16688Var +var p16692Var = "object " +var p16692 = &p16692Var +var p16695Var = "object " +var p16695 = &p16695Var +var p16698Var = "object " +var p16698 = &p16698Var +var p16701Var = "object " +var p16701 = &p16701Var +var p16704Var = "object " +var p16704 = &p16704Var +var p16707Var = "object " +var p16707 = &p16707Var +var p16710Var = "object " +var p16710 = &p16710Var +var p16715Var = "thunk from >" +var p16715 = &p16715Var +var p16717Var = "thunk from >" +var p16717 = &p16717Var +var p16723Var = "function " +var p16723 = &p16723Var +var p16732Var = "thunk from >" +var p16732 = &p16732Var +var p16741Var = "object " +var p16741 = &p16741Var +var p16743Var = "object " +var p16743 = &p16743Var +var p16767Var = "object " +var p16767 = &p16767Var +var p16771Var = "object " +var p16771 = &p16771Var +var p16774Var = "object " +var p16774 = &p16774Var +var p16777Var = "object " +var p16777 = &p16777Var +var p16780Var = "object " +var p16780 = &p16780Var +var p16783Var = "object " +var p16783 = &p16783Var +var p16786Var = "object " +var p16786 = &p16786Var +var p16789Var = "object " +var p16789 = &p16789Var +var p16794Var = "thunk from >" +var p16794 = &p16794Var +var p16796Var = "thunk from >" +var p16796 = &p16796Var +var p16802Var = "function " +var p16802 = &p16802Var +var p16806Var = "thunk from >" +var p16806 = &p16806Var +var p16815Var = "thunk from from >>" +var p16815 = &p16815Var +var p16821Var = "thunk from >" +var p16821 = &p16821Var +var p16830Var = "thunk from from >>" +var p16830 = &p16830Var +var p16845Var = "thunk from >" +var p16845 = &p16845Var +var p16861Var = "thunk from >" +var p16861 = &p16861Var +var p16868Var = "thunk from >" +var p16868 = &p16868Var +var p16877Var = "thunk from from >>" +var p16877 = &p16877Var +var p16892Var = "thunk from >" +var p16892 = &p16892Var +var p16903Var = "thunk from >>" +var p16903 = &p16903Var +var p16910Var = "thunk from >" +var p16910 = &p16910Var +var p16914Var = "function " +var p16914 = &p16914Var +var p16954Var = "thunk from >" +var p16954 = &p16954Var +var p16969Var = "thunk from >" +var p16969 = &p16969Var +var p16985Var = "thunk from >" +var p16985 = &p16985Var +var p16992Var = "thunk from >" +var p16992 = &p16992Var +var p17001Var = "thunk from from >>" +var p17001 = &p17001Var +var p17007Var = "thunk from >" +var p17007 = &p17007Var +var p17016Var = "thunk from from >>" +var p17016 = &p17016Var +var p17041Var = "thunk from >" +var p17041 = &p17041Var +var p17048Var = "thunk from >" +var p17048 = &p17048Var +var p17052Var = "function " +var p17052 = &p17052Var +var p17067Var = "thunk from >" +var p17067 = &p17067Var +var p17102Var = "thunk from >" +var p17102 = &p17102Var +var p17117Var = "thunk from >" +var p17117 = &p17117Var +var p17131Var = "thunk from >" +var p17131 = &p17131Var +var p17139Var = "object " +var p17139 = &p17139Var +var p17141Var = "object " +var p17141 = &p17141Var +var p17165Var = "object " +var p17165 = &p17165Var +var p17169Var = "object " +var p17169 = &p17169Var +var p17172Var = "object " +var p17172 = &p17172Var +var p17175Var = "object " +var p17175 = &p17175Var +var p17178Var = "object " +var p17178 = &p17178Var +var p17181Var = "object " +var p17181 = &p17181Var +var p17184Var = "object " +var p17184 = &p17184Var +var p17187Var = "object " +var p17187 = &p17187Var +var p17192Var = "thunk from >" +var p17192 = &p17192Var +var p17194Var = "thunk from >" +var p17194 = &p17194Var +var p17200Var = "function " +var p17200 = &p17200Var +var p17204Var = "thunk from >" +var p17204 = &p17204Var +var p17213Var = "thunk from from >>" +var p17213 = &p17213Var +var p17225Var = "thunk from >" +var p17225 = &p17225Var +var p17237Var = "thunk from >>" +var p17237 = &p17237Var +var p17248Var = "thunk from >>>" +var p17248 = &p17248Var +var p17255Var = "function " +var p17255 = &p17255Var +var p17265Var = "thunk from >>" +var p17265 = &p17265Var +var p17271Var = "object " +var p17271 = &p17271Var +var p17273Var = "object " +var p17273 = &p17273Var +var p17297Var = "object " +var p17297 = &p17297Var +var p17301Var = "object " +var p17301 = &p17301Var +var p17304Var = "object " +var p17304 = &p17304Var +var p17307Var = "object " +var p17307 = &p17307Var +var p17310Var = "object " +var p17310 = &p17310Var +var p17313Var = "object " +var p17313 = &p17313Var +var p17316Var = "object " +var p17316 = &p17316Var +var p17319Var = "object " +var p17319 = &p17319Var +var p17324Var = "thunk from >" +var p17324 = &p17324Var +var p17326Var = "thunk from >" +var p17326 = &p17326Var +var p17332Var = "function " +var p17332 = &p17332Var +var p17336Var = "thunk from >" +var p17336 = &p17336Var +var p17340Var = "function " +var p17340 = &p17340Var +var p17344Var = "thunk from >" +var p17344 = &p17344Var +var p17353Var = "thunk from from >>" +var p17353 = &p17353Var +var p17393Var = "thunk from >" +var p17393 = &p17393Var +var p17420Var = "thunk from >" +var p17420 = &p17420Var +var p17428Var = "thunk from >" +var p17428 = &p17428Var +var p17437Var = "thunk from from >>" +var p17437 = &p17437Var +var p17471Var = "thunk from >" +var p17471 = &p17471Var +var p17480Var = "thunk from >>" +var p17480 = &p17480Var +var p17486Var = "thunk from >" +var p17486 = &p17486Var +var p17495Var = "thunk from >>" +var p17495 = &p17495Var +var p17540Var = "thunk from >" +var p17540 = &p17540Var +var p17549Var = "thunk from >>" +var p17549 = &p17549Var +var p17564Var = "object " +var p17564 = &p17564Var +var p17573Var = "thunk from >" +var p17573 = &p17573Var +var p17589Var = "thunk from >" +var p17589 = &p17589Var var p1 = &Source{ lines: []string{ "/*\n", @@ -2743,7 +4347,7 @@ var p1 = &Source{ " aux(value, [], ''),\n", "\n", " manifestYamlDoc(value)::\n", - " local aux(v, in_array, in_object, path, cindent) =\n", + " local aux(v, in_object, path, cindent) =\n", " if v == true then\n", " 'true'\n", " else if v == false then\n", @@ -2768,21 +4372,21 @@ var p1 = &Source{ " '[]'\n", " else\n", " local range = std.range(0, std.length(v) - 1);\n", - " local new_indent = cindent + ' ';\n", - " local parts = [aux(v[i], true, false, path + [i], new_indent) for i in range];\n", - " (if in_object then '\\n' + cindent else '') + '- ' + std.join('\\n' + cindent + '- ', parts)\n", + " local actual_indent = if in_object then cindent[2:] else cindent;\n", + " local parts = [aux(v[i], false, path + [i], cindent) for i in range];\n", + " (if in_object then '\\n' + actual_indent else '')\n", + " + '- ' + std.join('\\n' + actual_indent + '- ', parts)\n", " else if std.type(v) == 'object' then\n", " if std.length(v) == 0 then\n", " '{}'\n", " else\n", " local new_indent = cindent + ' ';\n", " local lines = [\n", - " cindent + std.escapeStringJson(k) + ': ' + aux(v[k], false, true, path + [k], new_indent)\n", + " std.escapeStringJson(k) + ': ' + aux(v[k], true, path + [k], new_indent)\n", " for k in std.objectFields(v)\n", " ];\n", - " (if in_array || in_object then '\\n' else '')\n", - " + std.join('\\n', lines);\n", - " aux(value, false, false, [], ''),\n", + " (if in_object then '\\n' + cindent else '') + std.join('\\n' + cindent, lines);\n", + " aux(value, false, [], ''),\n", "\n", " manifestYamlStream(value)::\n", " if std.type(value) != 'array' then\n", @@ -3171,7 +4775,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p11, freeVariables: nil, }, Value: "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/", @@ -3413,7 +5017,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p35, freeVariables: Identifiers{ "base64_table", "i", @@ -3437,7 +5041,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p39, freeVariables: Identifiers{ "base64_table", "i", @@ -3457,7 +5061,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p42, freeVariables: Identifiers{ "base64_table", }, @@ -3478,7 +5082,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p45, freeVariables: Identifiers{ "i", }, @@ -3501,7 +5105,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p43, + context: p48, freeVariables: Identifiers{ "i", }, @@ -3530,7 +5134,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p51, freeVariables: Identifiers{ "std", }, @@ -3549,7 +5153,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p54, freeVariables: Identifiers{ "std", }, @@ -3568,7 +5172,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p57, freeVariables: Identifiers{ "std", }, @@ -3614,7 +5218,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p54, + context: p62, freeVariables: nil, }, Value: float64(0), @@ -3634,14 +5238,14 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p54, + context: p64, freeVariables: nil, }, Value: float64(63), OriginalString: "63", }, }, - Named: nil, + Named: []NamedArgument{}, }, TrailingComma: false, TailStrict: false, @@ -3697,7 +5301,7 @@ var StdAst = &DesugaredObject{ }, file: nil, }, - context: p9, + context: p68, freeVariables: Identifiers{ "std", }, @@ -3808,7 +5412,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p69, + context: p80, freeVariables: Identifiers{ "std", "v", @@ -3828,7 +5432,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p69, + context: p80, freeVariables: Identifiers{ "std", }, @@ -3847,7 +5451,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p69, + context: p80, freeVariables: Identifiers{ "std", }, @@ -3893,7 +5497,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p78, + context: p89, freeVariables: Identifiers{ "v", }, @@ -3920,7 +5524,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p69, + context: p80, freeVariables: nil, }, Value: "string", @@ -3994,7 +5598,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p96, freeVariables: nil, }, }, @@ -4016,7 +5620,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p98, freeVariables: nil, }, Value: "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/", @@ -4258,7 +5862,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p122, freeVariables: Identifiers{ "base64_table", "i", @@ -4282,7 +5886,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p126, freeVariables: Identifiers{ "base64_table", "i", @@ -4302,7 +5906,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p129, freeVariables: Identifiers{ "base64_table", }, @@ -4323,7 +5927,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p132, freeVariables: Identifiers{ "i", }, @@ -4346,7 +5950,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p43, + context: p135, freeVariables: Identifiers{ "i", }, @@ -4375,7 +5979,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p138, freeVariables: Identifiers{ "std", }, @@ -4394,7 +5998,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p141, freeVariables: Identifiers{ "std", }, @@ -4413,7 +6017,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p144, freeVariables: Identifiers{ "std", }, @@ -4459,7 +6063,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p54, + context: p149, freeVariables: nil, }, Value: float64(0), @@ -4479,14 +6083,14 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p54, + context: p151, freeVariables: nil, }, Value: float64(63), OriginalString: "63", }, }, - Named: nil, + Named: []NamedArgument{}, }, TrailingComma: false, TailStrict: false, @@ -4542,7 +6146,7 @@ var StdAst = &DesugaredObject{ }, file: nil, }, - context: p9, + context: p68, freeVariables: Identifiers{ "std", }, @@ -4653,7 +6257,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p95, + context: p165, freeVariables: Identifiers{ "std", "v", @@ -4673,7 +6277,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p95, + context: p165, freeVariables: Identifiers{ "std", }, @@ -4692,7 +6296,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p95, + context: p165, freeVariables: Identifiers{ "std", }, @@ -4738,7 +6342,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p104, + context: p174, freeVariables: Identifiers{ "v", }, @@ -4765,7 +6369,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p95, + context: p165, freeVariables: nil, }, Value: "number", @@ -4839,7 +6443,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p181, freeVariables: nil, }, }, @@ -4861,7 +6465,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p183, freeVariables: nil, }, Value: "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/", @@ -5103,7 +6707,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p207, freeVariables: Identifiers{ "base64_table", "i", @@ -5127,7 +6731,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p211, freeVariables: Identifiers{ "base64_table", "i", @@ -5147,7 +6751,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p214, freeVariables: Identifiers{ "base64_table", }, @@ -5168,7 +6772,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p217, freeVariables: Identifiers{ "i", }, @@ -5191,7 +6795,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p43, + context: p220, freeVariables: Identifiers{ "i", }, @@ -5220,7 +6824,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p223, freeVariables: Identifiers{ "std", }, @@ -5239,7 +6843,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p226, freeVariables: Identifiers{ "std", }, @@ -5258,7 +6862,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p229, freeVariables: Identifiers{ "std", }, @@ -5304,7 +6908,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p54, + context: p234, freeVariables: nil, }, Value: float64(0), @@ -5324,14 +6928,14 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p54, + context: p236, freeVariables: nil, }, Value: float64(63), OriginalString: "63", }, }, - Named: nil, + Named: []NamedArgument{}, }, TrailingComma: false, TailStrict: false, @@ -5387,7 +6991,7 @@ var StdAst = &DesugaredObject{ }, file: nil, }, - context: p9, + context: p68, freeVariables: Identifiers{ "std", }, @@ -5498,7 +7102,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p121, + context: p250, freeVariables: Identifiers{ "std", "v", @@ -5518,7 +7122,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p121, + context: p250, freeVariables: Identifiers{ "std", }, @@ -5537,7 +7141,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p121, + context: p250, freeVariables: Identifiers{ "std", }, @@ -5583,7 +7187,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p130, + context: p259, freeVariables: Identifiers{ "v", }, @@ -5610,7 +7214,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p121, + context: p250, freeVariables: nil, }, Value: "boolean", @@ -5684,7 +7288,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p266, freeVariables: nil, }, }, @@ -5706,7 +7310,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p268, freeVariables: nil, }, Value: "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/", @@ -5948,7 +7552,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p292, freeVariables: Identifiers{ "base64_table", "i", @@ -5972,7 +7576,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p296, freeVariables: Identifiers{ "base64_table", "i", @@ -5992,7 +7596,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p299, freeVariables: Identifiers{ "base64_table", }, @@ -6013,7 +7617,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p302, freeVariables: Identifiers{ "i", }, @@ -6036,7 +7640,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p43, + context: p305, freeVariables: Identifiers{ "i", }, @@ -6065,7 +7669,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p308, freeVariables: Identifiers{ "std", }, @@ -6084,7 +7688,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p311, freeVariables: Identifiers{ "std", }, @@ -6103,7 +7707,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p314, freeVariables: Identifiers{ "std", }, @@ -6149,7 +7753,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p54, + context: p319, freeVariables: nil, }, Value: float64(0), @@ -6169,14 +7773,14 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p54, + context: p321, freeVariables: nil, }, Value: float64(63), OriginalString: "63", }, }, - Named: nil, + Named: []NamedArgument{}, }, TrailingComma: false, TailStrict: false, @@ -6232,7 +7836,7 @@ var StdAst = &DesugaredObject{ }, file: nil, }, - context: p9, + context: p68, freeVariables: Identifiers{ "std", }, @@ -6343,7 +7947,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p147, + context: p335, freeVariables: Identifiers{ "std", "v", @@ -6363,7 +7967,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p147, + context: p335, freeVariables: Identifiers{ "std", }, @@ -6382,7 +7986,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p147, + context: p335, freeVariables: Identifiers{ "std", }, @@ -6428,7 +8032,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p156, + context: p344, freeVariables: Identifiers{ "v", }, @@ -6455,7 +8059,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p147, + context: p335, freeVariables: nil, }, Value: "object", @@ -6529,7 +8133,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p351, freeVariables: nil, }, }, @@ -6551,7 +8155,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p353, freeVariables: nil, }, Value: "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/", @@ -6793,7 +8397,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p377, freeVariables: Identifiers{ "base64_table", "i", @@ -6817,7 +8421,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p381, freeVariables: Identifiers{ "base64_table", "i", @@ -6837,7 +8441,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p384, freeVariables: Identifiers{ "base64_table", }, @@ -6858,7 +8462,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p387, freeVariables: Identifiers{ "i", }, @@ -6881,7 +8485,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p43, + context: p390, freeVariables: Identifiers{ "i", }, @@ -6910,7 +8514,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p393, freeVariables: Identifiers{ "std", }, @@ -6929,7 +8533,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p396, freeVariables: Identifiers{ "std", }, @@ -6948,7 +8552,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p399, freeVariables: Identifiers{ "std", }, @@ -6994,7 +8598,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p54, + context: p404, freeVariables: nil, }, Value: float64(0), @@ -7014,14 +8618,14 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p54, + context: p406, freeVariables: nil, }, Value: float64(63), OriginalString: "63", }, }, - Named: nil, + Named: []NamedArgument{}, }, TrailingComma: false, TailStrict: false, @@ -7077,7 +8681,7 @@ var StdAst = &DesugaredObject{ }, file: nil, }, - context: p9, + context: p68, freeVariables: Identifiers{ "std", }, @@ -7188,7 +8792,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p173, + context: p420, freeVariables: Identifiers{ "std", "v", @@ -7208,7 +8812,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p173, + context: p420, freeVariables: Identifiers{ "std", }, @@ -7227,7 +8831,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p173, + context: p420, freeVariables: Identifiers{ "std", }, @@ -7273,7 +8877,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p182, + context: p429, freeVariables: Identifiers{ "v", }, @@ -7300,7 +8904,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p173, + context: p420, freeVariables: nil, }, Value: "array", @@ -7374,7 +8978,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p436, freeVariables: nil, }, }, @@ -7396,7 +9000,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p438, freeVariables: nil, }, Value: "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/", @@ -7638,7 +9242,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p462, freeVariables: Identifiers{ "base64_table", "i", @@ -7662,7 +9266,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p466, freeVariables: Identifiers{ "base64_table", "i", @@ -7682,7 +9286,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p469, freeVariables: Identifiers{ "base64_table", }, @@ -7703,7 +9307,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p472, freeVariables: Identifiers{ "i", }, @@ -7726,7 +9330,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p43, + context: p475, freeVariables: Identifiers{ "i", }, @@ -7755,7 +9359,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p478, freeVariables: Identifiers{ "std", }, @@ -7774,7 +9378,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p481, freeVariables: Identifiers{ "std", }, @@ -7793,7 +9397,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p484, freeVariables: Identifiers{ "std", }, @@ -7839,7 +9443,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p54, + context: p489, freeVariables: nil, }, Value: float64(0), @@ -7859,14 +9463,14 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p54, + context: p491, freeVariables: nil, }, Value: float64(63), OriginalString: "63", }, }, - Named: nil, + Named: []NamedArgument{}, }, TrailingComma: false, TailStrict: false, @@ -7922,7 +9526,7 @@ var StdAst = &DesugaredObject{ }, file: nil, }, - context: p9, + context: p68, freeVariables: Identifiers{ "std", }, @@ -8033,7 +9637,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p199, + context: p505, freeVariables: Identifiers{ "std", "v", @@ -8053,7 +9657,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p199, + context: p505, freeVariables: Identifiers{ "std", }, @@ -8072,7 +9676,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p199, + context: p505, freeVariables: Identifiers{ "std", }, @@ -8118,7 +9722,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p208, + context: p514, freeVariables: Identifiers{ "v", }, @@ -8145,7 +9749,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p199, + context: p505, freeVariables: nil, }, Value: "function", @@ -8219,7 +9823,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p521, freeVariables: nil, }, }, @@ -8241,7 +9845,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p523, freeVariables: nil, }, Value: "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/", @@ -8483,7 +10087,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p547, freeVariables: Identifiers{ "base64_table", "i", @@ -8507,7 +10111,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p551, freeVariables: Identifiers{ "base64_table", "i", @@ -8527,7 +10131,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p554, freeVariables: Identifiers{ "base64_table", }, @@ -8548,7 +10152,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p557, freeVariables: Identifiers{ "i", }, @@ -8571,7 +10175,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p43, + context: p560, freeVariables: Identifiers{ "i", }, @@ -8600,7 +10204,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p563, freeVariables: Identifiers{ "std", }, @@ -8619,7 +10223,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p566, freeVariables: Identifiers{ "std", }, @@ -8638,7 +10242,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p569, freeVariables: Identifiers{ "std", }, @@ -8684,7 +10288,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p54, + context: p574, freeVariables: nil, }, Value: float64(0), @@ -8704,14 +10308,14 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p54, + context: p576, freeVariables: nil, }, Value: float64(63), OriginalString: "63", }, }, - Named: nil, + Named: []NamedArgument{}, }, TrailingComma: false, TailStrict: false, @@ -8767,7 +10371,7 @@ var StdAst = &DesugaredObject{ }, file: nil, }, - context: p9, + context: p68, freeVariables: Identifiers{ "std", }, @@ -8793,7 +10397,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p217, + context: p582, freeVariables: Identifiers{ "a", "std", @@ -8898,7 +10502,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p217, + context: p582, freeVariables: Identifiers{ "a", "std", @@ -8918,7 +10522,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p217, + context: p582, freeVariables: Identifiers{ "std", }, @@ -8937,7 +10541,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p217, + context: p582, freeVariables: Identifiers{ "std", }, @@ -8983,7 +10587,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p236, + context: p601, freeVariables: Identifiers{ "a", }, @@ -9010,7 +10614,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p217, + context: p582, freeVariables: nil, }, Value: "string", @@ -9037,7 +10641,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p217, + context: p582, freeVariables: Identifiers{ "a", }, @@ -9058,7 +10662,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p217, + context: p582, freeVariables: Identifiers{ "a", }, @@ -9077,7 +10681,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p217, + context: p582, freeVariables: nil, }, Value: "", @@ -9099,7 +10703,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p217, + context: p582, freeVariables: Identifiers{ "a", }, @@ -9169,7 +10773,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p615, freeVariables: nil, }, }, @@ -9191,7 +10795,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p617, freeVariables: nil, }, Value: "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/", @@ -9433,7 +11037,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p641, freeVariables: Identifiers{ "base64_table", "i", @@ -9457,7 +11061,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p645, freeVariables: Identifiers{ "base64_table", "i", @@ -9477,7 +11081,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p648, freeVariables: Identifiers{ "base64_table", }, @@ -9498,7 +11102,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p651, freeVariables: Identifiers{ "i", }, @@ -9521,7 +11125,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p43, + context: p654, freeVariables: Identifiers{ "i", }, @@ -9550,7 +11154,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p657, freeVariables: Identifiers{ "std", }, @@ -9569,7 +11173,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p660, freeVariables: Identifiers{ "std", }, @@ -9588,7 +11192,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p663, freeVariables: Identifiers{ "std", }, @@ -9634,7 +11238,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p54, + context: p668, freeVariables: nil, }, Value: float64(0), @@ -9654,14 +11258,14 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p54, + context: p670, freeVariables: nil, }, Value: float64(63), OriginalString: "63", }, }, - Named: nil, + Named: []NamedArgument{}, }, TrailingComma: false, TailStrict: false, @@ -9717,7 +11321,7 @@ var StdAst = &DesugaredObject{ }, file: nil, }, - context: p9, + context: p68, freeVariables: Identifiers{ "std", }, @@ -9745,7 +11349,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p252, + context: p676, freeVariables: Identifiers{ "from", "len", @@ -9873,7 +11477,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p252, + context: p676, freeVariables: Identifiers{ "std", "str", @@ -9893,7 +11497,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p252, + context: p676, freeVariables: Identifiers{ "std", }, @@ -9912,7 +11516,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p252, + context: p676, freeVariables: Identifiers{ "std", }, @@ -9958,7 +11562,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p273, + context: p697, freeVariables: Identifiers{ "str", }, @@ -9985,7 +11589,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p252, + context: p676, freeVariables: nil, }, Value: "string", @@ -10013,7 +11617,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p252, + context: p676, freeVariables: Identifiers{ "std", "str", @@ -10033,7 +11637,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p252, + context: p676, freeVariables: Identifiers{ "std", "str", @@ -10053,7 +11657,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p252, + context: p676, freeVariables: nil, }, Value: "substr first parameter should be a string, got ", @@ -10075,7 +11679,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p252, + context: p676, freeVariables: Identifiers{ "std", "str", @@ -10095,7 +11699,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p252, + context: p676, freeVariables: Identifiers{ "std", }, @@ -10114,7 +11718,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p252, + context: p676, freeVariables: Identifiers{ "std", }, @@ -10160,7 +11764,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p290, + context: p714, freeVariables: Identifiers{ "str", }, @@ -10189,7 +11793,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p252, + context: p676, freeVariables: Identifiers{ "from", "len", @@ -10317,7 +11921,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p252, + context: p676, freeVariables: Identifiers{ "from", "std", @@ -10337,7 +11941,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p252, + context: p676, freeVariables: Identifiers{ "std", }, @@ -10356,7 +11960,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p252, + context: p676, freeVariables: Identifiers{ "std", }, @@ -10402,7 +12006,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p313, + context: p737, freeVariables: Identifiers{ "from", }, @@ -10429,7 +12033,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p252, + context: p676, freeVariables: nil, }, Value: "number", @@ -10457,7 +12061,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p252, + context: p676, freeVariables: Identifiers{ "from", "std", @@ -10477,7 +12081,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p252, + context: p676, freeVariables: Identifiers{ "from", "std", @@ -10497,7 +12101,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p252, + context: p676, freeVariables: nil, }, Value: "substr second parameter should be a number, got ", @@ -10519,7 +12123,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p252, + context: p676, freeVariables: Identifiers{ "from", "std", @@ -10539,7 +12143,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p252, + context: p676, freeVariables: Identifiers{ "std", }, @@ -10558,7 +12162,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p252, + context: p676, freeVariables: Identifiers{ "std", }, @@ -10604,7 +12208,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p330, + context: p754, freeVariables: Identifiers{ "from", }, @@ -10633,7 +12237,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p252, + context: p676, freeVariables: Identifiers{ "from", "len", @@ -10761,7 +12365,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p252, + context: p676, freeVariables: Identifiers{ "len", "std", @@ -10781,7 +12385,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p252, + context: p676, freeVariables: Identifiers{ "std", }, @@ -10800,7 +12404,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p252, + context: p676, freeVariables: Identifiers{ "std", }, @@ -10846,7 +12450,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p353, + context: p777, freeVariables: Identifiers{ "len", }, @@ -10873,7 +12477,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p252, + context: p676, freeVariables: nil, }, Value: "number", @@ -10901,7 +12505,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p252, + context: p676, freeVariables: Identifiers{ "len", "std", @@ -10921,7 +12525,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p252, + context: p676, freeVariables: Identifiers{ "len", "std", @@ -10941,7 +12545,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p252, + context: p676, freeVariables: nil, }, Value: "substr third parameter should be a number, got ", @@ -10963,7 +12567,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p252, + context: p676, freeVariables: Identifiers{ "len", "std", @@ -10983,7 +12587,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p252, + context: p676, freeVariables: Identifiers{ "std", }, @@ -11002,7 +12606,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p252, + context: p676, freeVariables: Identifiers{ "std", }, @@ -11048,7 +12652,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p370, + context: p794, freeVariables: Identifiers{ "len", }, @@ -11077,7 +12681,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p252, + context: p676, freeVariables: Identifiers{ "from", "len", @@ -11099,7 +12703,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p252, + context: p676, freeVariables: Identifiers{ "len", }, @@ -11118,7 +12722,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p252, + context: p676, freeVariables: Identifiers{ "len", }, @@ -11140,7 +12744,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p252, + context: p676, freeVariables: nil, }, Value: float64(0), @@ -11161,7 +12765,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p252, + context: p676, freeVariables: Identifiers{ "len", }, @@ -11180,7 +12784,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p252, + context: p676, freeVariables: Identifiers{ "len", }, @@ -11199,7 +12803,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p252, + context: p676, freeVariables: nil, }, Value: "substr third parameter should be greater than zero, got ", @@ -11221,7 +12825,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p252, + context: p676, freeVariables: Identifiers{ "len", }, @@ -11244,7 +12848,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p252, + context: p676, freeVariables: Identifiers{ "from", "len", @@ -11266,7 +12870,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p252, + context: p676, freeVariables: Identifiers{ "std", }, @@ -11285,7 +12889,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p252, + context: p676, freeVariables: Identifiers{ "std", }, @@ -11331,7 +12935,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p395, + context: p819, freeVariables: nil, }, Value: "", @@ -11352,7 +12956,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p395, + context: p819, freeVariables: Identifiers{ "from", "len", @@ -11374,7 +12978,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p395, + context: p819, freeVariables: Identifiers{ "std", }, @@ -11393,7 +12997,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p395, + context: p819, freeVariables: Identifiers{ "std", }, @@ -11439,7 +13043,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p405, + context: p829, freeVariables: Identifiers{ "len", }, @@ -11460,7 +13064,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p405, + context: p829, freeVariables: Identifiers{ "from", "str", @@ -11487,7 +13091,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p411, + context: p835, freeVariables: Identifiers{ "from", "i", @@ -11508,7 +13112,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p411, + context: p835, freeVariables: Identifiers{ "str", }, @@ -11529,7 +13133,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p411, + context: p835, freeVariables: Identifiers{ "from", "i", @@ -11549,7 +13153,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p411, + context: p835, freeVariables: Identifiers{ "i", }, @@ -11571,7 +13175,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p411, + context: p835, freeVariables: Identifiers{ "from", }, @@ -11659,7 +13263,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p849, freeVariables: nil, }, }, @@ -11681,7 +13285,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p851, freeVariables: nil, }, Value: "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/", @@ -11923,7 +13527,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p875, freeVariables: Identifiers{ "base64_table", "i", @@ -11947,7 +13551,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p879, freeVariables: Identifiers{ "base64_table", "i", @@ -11967,7 +13571,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p882, freeVariables: Identifiers{ "base64_table", }, @@ -11988,7 +13592,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p885, freeVariables: Identifiers{ "i", }, @@ -12011,7 +13615,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p43, + context: p888, freeVariables: Identifiers{ "i", }, @@ -12040,7 +13644,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p891, freeVariables: Identifiers{ "std", }, @@ -12059,7 +13663,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p894, freeVariables: Identifiers{ "std", }, @@ -12078,7 +13682,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p897, freeVariables: Identifiers{ "std", }, @@ -12124,7 +13728,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p54, + context: p902, freeVariables: nil, }, Value: float64(0), @@ -12144,14 +13748,14 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p54, + context: p904, freeVariables: nil, }, Value: float64(63), OriginalString: "63", }, }, - Named: nil, + Named: []NamedArgument{}, }, TrailingComma: false, TailStrict: false, @@ -12207,7 +13811,7 @@ var StdAst = &DesugaredObject{ }, file: nil, }, - context: p9, + context: p68, freeVariables: Identifiers{ "std", }, @@ -12234,7 +13838,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p427, + context: p910, freeVariables: Identifiers{ "a", "b", @@ -12255,7 +13859,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p427, + context: p910, freeVariables: Identifiers{ "a", "b", @@ -12276,7 +13880,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p427, + context: p910, freeVariables: Identifiers{ "a", "std", @@ -12296,7 +13900,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p427, + context: p910, freeVariables: Identifiers{ "std", }, @@ -12315,7 +13919,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p427, + context: p910, freeVariables: Identifiers{ "std", }, @@ -12361,7 +13965,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p440, + context: p923, freeVariables: Identifiers{ "a", }, @@ -12389,7 +13993,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p427, + context: p910, freeVariables: Identifiers{ "b", "std", @@ -12409,7 +14013,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p427, + context: p910, freeVariables: Identifiers{ "std", }, @@ -12428,7 +14032,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p427, + context: p910, freeVariables: Identifiers{ "std", }, @@ -12474,7 +14078,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p451, + context: p934, freeVariables: Identifiers{ "b", }, @@ -12502,7 +14106,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p427, + context: p910, freeVariables: nil, }, Value: false, @@ -12607,7 +14211,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p427, + context: p910, freeVariables: Identifiers{ "a", "b", @@ -12628,7 +14232,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p427, + context: p910, freeVariables: Identifiers{ "std", }, @@ -12647,7 +14251,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p427, + context: p910, freeVariables: Identifiers{ "std", }, @@ -12693,7 +14297,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p471, + context: p954, freeVariables: Identifiers{ "a", }, @@ -12714,7 +14318,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p471, + context: p954, freeVariables: nil, }, Value: float64(0), @@ -12734,7 +14338,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p471, + context: p954, freeVariables: Identifiers{ "b", "std", @@ -12754,7 +14358,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p471, + context: p954, freeVariables: Identifiers{ "std", }, @@ -12773,7 +14377,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p471, + context: p954, freeVariables: Identifiers{ "std", }, @@ -12819,7 +14423,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p483, + context: p966, freeVariables: Identifiers{ "b", }, @@ -12852,7 +14456,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p427, + context: p910, freeVariables: Identifiers{ "b", }, @@ -12927,7 +14531,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p974, freeVariables: nil, }, }, @@ -12949,7 +14553,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p976, freeVariables: nil, }, Value: "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/", @@ -13191,7 +14795,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p1000, freeVariables: Identifiers{ "base64_table", "i", @@ -13215,7 +14819,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p1004, freeVariables: Identifiers{ "base64_table", "i", @@ -13235,7 +14839,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p1007, freeVariables: Identifiers{ "base64_table", }, @@ -13256,7 +14860,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p1010, freeVariables: Identifiers{ "i", }, @@ -13279,7 +14883,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p43, + context: p1013, freeVariables: Identifiers{ "i", }, @@ -13308,7 +14912,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p1016, freeVariables: Identifiers{ "std", }, @@ -13327,7 +14931,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p1019, freeVariables: Identifiers{ "std", }, @@ -13346,7 +14950,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p1022, freeVariables: Identifiers{ "std", }, @@ -13392,7 +14996,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p54, + context: p1027, freeVariables: nil, }, Value: float64(0), @@ -13412,14 +15016,14 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p54, + context: p1029, freeVariables: nil, }, Value: float64(63), OriginalString: "63", }, }, - Named: nil, + Named: []NamedArgument{}, }, TrailingComma: false, TailStrict: false, @@ -13475,7 +15079,7 @@ var StdAst = &DesugaredObject{ }, file: nil, }, - context: p9, + context: p68, freeVariables: Identifiers{ "std", }, @@ -13502,7 +15106,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p493, + context: p1035, freeVariables: Identifiers{ "a", "b", @@ -13523,7 +15127,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p493, + context: p1035, freeVariables: Identifiers{ "a", "b", @@ -13544,7 +15148,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p493, + context: p1035, freeVariables: Identifiers{ "a", "std", @@ -13564,7 +15168,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p493, + context: p1035, freeVariables: Identifiers{ "std", }, @@ -13583,7 +15187,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p493, + context: p1035, freeVariables: Identifiers{ "std", }, @@ -13629,7 +15233,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p506, + context: p1048, freeVariables: Identifiers{ "a", }, @@ -13657,7 +15261,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p493, + context: p1035, freeVariables: Identifiers{ "b", "std", @@ -13677,7 +15281,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p493, + context: p1035, freeVariables: Identifiers{ "std", }, @@ -13696,7 +15300,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p493, + context: p1035, freeVariables: Identifiers{ "std", }, @@ -13742,7 +15346,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p517, + context: p1059, freeVariables: Identifiers{ "b", }, @@ -13770,7 +15374,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p493, + context: p1035, freeVariables: nil, }, Value: false, @@ -13875,7 +15479,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p493, + context: p1035, freeVariables: Identifiers{ "a", "b", @@ -13896,7 +15500,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p493, + context: p1035, freeVariables: Identifiers{ "std", }, @@ -13915,7 +15519,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p493, + context: p1035, freeVariables: Identifiers{ "std", }, @@ -13961,7 +15565,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p537, + context: p1079, freeVariables: Identifiers{ "a", }, @@ -13982,7 +15586,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p537, + context: p1079, freeVariables: Identifiers{ "a", "b", @@ -14003,7 +15607,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p537, + context: p1079, freeVariables: Identifiers{ "a", "std", @@ -14023,7 +15627,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p537, + context: p1079, freeVariables: Identifiers{ "std", }, @@ -14042,7 +15646,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p537, + context: p1079, freeVariables: Identifiers{ "std", }, @@ -14088,7 +15692,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p550, + context: p1092, freeVariables: Identifiers{ "a", }, @@ -14116,7 +15720,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p537, + context: p1079, freeVariables: Identifiers{ "b", "std", @@ -14136,7 +15740,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p537, + context: p1079, freeVariables: Identifiers{ "std", }, @@ -14155,7 +15759,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p537, + context: p1079, freeVariables: Identifiers{ "std", }, @@ -14201,7 +15805,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p561, + context: p1103, freeVariables: Identifiers{ "b", }, @@ -14229,7 +15833,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p537, + context: p1079, freeVariables: Identifiers{ "b", "std", @@ -14249,7 +15853,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p537, + context: p1079, freeVariables: Identifiers{ "std", }, @@ -14268,7 +15872,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p537, + context: p1079, freeVariables: Identifiers{ "std", }, @@ -14314,7 +15918,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p572, + context: p1114, freeVariables: Identifiers{ "b", }, @@ -14347,7 +15951,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p493, + context: p1035, freeVariables: Identifiers{ "b", }, @@ -14422,7 +16026,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p1122, freeVariables: nil, }, }, @@ -14444,7 +16048,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p1124, freeVariables: nil, }, Value: "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/", @@ -14686,7 +16290,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p1148, freeVariables: Identifiers{ "base64_table", "i", @@ -14710,7 +16314,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p1152, freeVariables: Identifiers{ "base64_table", "i", @@ -14730,7 +16334,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p1155, freeVariables: Identifiers{ "base64_table", }, @@ -14751,7 +16355,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p1158, freeVariables: Identifiers{ "i", }, @@ -14774,7 +16378,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p43, + context: p1161, freeVariables: Identifiers{ "i", }, @@ -14803,7 +16407,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p1164, freeVariables: Identifiers{ "std", }, @@ -14822,7 +16426,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p1167, freeVariables: Identifiers{ "std", }, @@ -14841,7 +16445,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p1170, freeVariables: Identifiers{ "std", }, @@ -14887,7 +16491,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p54, + context: p1175, freeVariables: nil, }, Value: float64(0), @@ -14907,14 +16511,14 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p54, + context: p1177, freeVariables: nil, }, Value: float64(63), OriginalString: "63", }, }, - Named: nil, + Named: []NamedArgument{}, }, TrailingComma: false, TailStrict: false, @@ -14970,7 +16574,7 @@ var StdAst = &DesugaredObject{ }, file: nil, }, - context: p9, + context: p68, freeVariables: Identifiers{ "std", }, @@ -14996,7 +16600,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p582, + context: p1183, freeVariables: Identifiers{ "std", "str", @@ -15016,7 +16620,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p582, + context: p1183, freeVariables: Identifiers{ "std", }, @@ -15035,7 +16639,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p582, + context: p1183, freeVariables: Identifiers{ "std", }, @@ -15081,7 +16685,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p591, + context: p1192, freeVariables: Identifiers{ "std", "str", @@ -15101,7 +16705,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p591, + context: p1192, freeVariables: Identifiers{ "std", }, @@ -15120,7 +16724,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p591, + context: p1192, freeVariables: Identifiers{ "std", }, @@ -15166,7 +16770,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p600, + context: p1201, freeVariables: Identifiers{ "str", }, @@ -15193,7 +16797,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p591, + context: p1192, freeVariables: Identifiers{ "str", }, @@ -15219,7 +16823,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p606, + context: p1207, freeVariables: Identifiers{ "i", "str", @@ -15239,7 +16843,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p606, + context: p1207, freeVariables: Identifiers{ "str", }, @@ -15260,7 +16864,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p606, + context: p1207, freeVariables: Identifiers{ "i", }, @@ -15337,7 +16941,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p1217, freeVariables: nil, }, }, @@ -15359,7 +16963,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p1219, freeVariables: nil, }, Value: "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/", @@ -15601,7 +17205,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p1243, freeVariables: Identifiers{ "base64_table", "i", @@ -15625,7 +17229,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p1247, freeVariables: Identifiers{ "base64_table", "i", @@ -15645,7 +17249,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p1250, freeVariables: Identifiers{ "base64_table", }, @@ -15666,7 +17270,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p1253, freeVariables: Identifiers{ "i", }, @@ -15689,7 +17293,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p43, + context: p1256, freeVariables: Identifiers{ "i", }, @@ -15718,7 +17322,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p1259, freeVariables: Identifiers{ "std", }, @@ -15737,7 +17341,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p1262, freeVariables: Identifiers{ "std", }, @@ -15756,7 +17360,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p1265, freeVariables: Identifiers{ "std", }, @@ -15802,7 +17406,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p54, + context: p1270, freeVariables: nil, }, Value: float64(0), @@ -15822,14 +17426,14 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p54, + context: p1272, freeVariables: nil, }, Value: float64(63), OriginalString: "63", }, }, - Named: nil, + Named: []NamedArgument{}, }, TrailingComma: false, TailStrict: false, @@ -15885,7 +17489,7 @@ var StdAst = &DesugaredObject{ }, file: nil, }, - context: p9, + context: p68, freeVariables: Identifiers{ "std", }, @@ -15911,7 +17515,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p618, + context: p1278, freeVariables: Identifiers{ "std", "str", @@ -15934,7 +17538,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p622, + context: p1282, freeVariables: nil, }, Parameters: Parameters{ @@ -15959,7 +17563,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p625, + context: p1285, freeVariables: Identifiers{ "aggregate", "digit", @@ -15979,7 +17583,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p625, + context: p1285, freeVariables: Identifiers{ "digit", }, @@ -15998,7 +17602,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p625, + context: p1285, freeVariables: Identifiers{ "digit", }, @@ -16017,7 +17621,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p625, + context: p1285, freeVariables: Identifiers{ "digit", }, @@ -16039,7 +17643,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p625, + context: p1285, freeVariables: nil, }, Value: float64(0), @@ -16061,7 +17665,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p625, + context: p1285, freeVariables: Identifiers{ "digit", }, @@ -16080,7 +17684,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p625, + context: p1285, freeVariables: Identifiers{ "digit", }, @@ -16102,7 +17706,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p625, + context: p1285, freeVariables: nil, }, Value: float64(9), @@ -16124,7 +17728,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p625, + context: p1285, freeVariables: nil, }, Expr: &LiteralString{ @@ -16141,7 +17745,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p625, + context: p1285, freeVariables: nil, }, Value: "parseInt got string which does not match regex [0-9]+", @@ -16163,7 +17767,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p625, + context: p1285, freeVariables: Identifiers{ "aggregate", "digit", @@ -16183,7 +17787,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p625, + context: p1285, freeVariables: Identifiers{ "aggregate", }, @@ -16202,7 +17806,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p625, + context: p1285, freeVariables: nil, }, Value: float64(10), @@ -16223,7 +17827,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p625, + context: p1285, freeVariables: Identifiers{ "aggregate", }, @@ -16246,7 +17850,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p625, + context: p1285, freeVariables: Identifiers{ "digit", }, @@ -16273,7 +17877,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p618, + context: p1278, freeVariables: Identifiers{ "addDigit", "std", @@ -16297,7 +17901,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p654, + context: p1314, freeVariables: Identifiers{ "std", }, @@ -16455,7 +18059,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p672, + context: p1332, freeVariables: Identifiers{ "char", "std", @@ -16475,7 +18079,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p672, + context: p1332, freeVariables: Identifiers{ "char", "std", @@ -16495,7 +18099,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p672, + context: p1332, freeVariables: Identifiers{ "std", }, @@ -16514,7 +18118,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p672, + context: p1332, freeVariables: Identifiers{ "std", }, @@ -16560,7 +18164,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p683, + context: p1343, freeVariables: Identifiers{ "char", }, @@ -16588,7 +18192,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p672, + context: p1332, freeVariables: Identifiers{ "std", }, @@ -16607,7 +18211,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p672, + context: p1332, freeVariables: Identifiers{ "std", }, @@ -16626,7 +18230,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p672, + context: p1332, freeVariables: Identifiers{ "std", }, @@ -16672,7 +18276,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p694, + context: p1354, freeVariables: nil, }, Value: "0", @@ -16704,7 +18308,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p696, + context: p1356, freeVariables: Identifiers{ "std", "str", @@ -16724,7 +18328,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p696, + context: p1356, freeVariables: Identifiers{ "std", }, @@ -16743,7 +18347,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p696, + context: p1356, freeVariables: Identifiers{ "std", }, @@ -16789,7 +18393,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p705, + context: p1365, freeVariables: Identifiers{ "str", }, @@ -16826,7 +18430,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p618, + context: p1278, freeVariables: Identifiers{ "addDigit", "std", @@ -16933,7 +18537,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p618, + context: p1278, freeVariables: Identifiers{ "str", }, @@ -16952,7 +18556,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p618, + context: p1278, freeVariables: Identifiers{ "str", }, @@ -16973,7 +18577,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p618, + context: p1278, freeVariables: nil, }, Value: float64(0), @@ -16995,7 +18599,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p618, + context: p1278, freeVariables: nil, }, Value: "-", @@ -17022,7 +18626,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p618, + context: p1278, freeVariables: Identifiers{ "addDigit", "std", @@ -17045,7 +18649,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p618, + context: p1278, freeVariables: Identifiers{ "addDigit", "std", @@ -17067,7 +18671,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p618, + context: p1278, freeVariables: Identifiers{ "std", }, @@ -17086,7 +18690,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p618, + context: p1278, freeVariables: Identifiers{ "std", }, @@ -17132,7 +18736,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p734, + context: p1394, freeVariables: Identifiers{ "addDigit", }, @@ -17153,7 +18757,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p734, + context: p1394, freeVariables: Identifiers{ "std", "str", @@ -17174,7 +18778,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p734, + context: p1394, freeVariables: Identifiers{ "toDigits", }, @@ -17282,7 +18886,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p750, + context: p1410, freeVariables: Identifiers{ "str", }, @@ -17303,7 +18907,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p750, + context: p1410, freeVariables: nil, }, Value: float64(1), @@ -17371,7 +18975,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p734, + context: p1394, freeVariables: nil, }, Value: float64(0), @@ -17398,7 +19002,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p618, + context: p1278, freeVariables: Identifiers{ "addDigit", "std", @@ -17420,7 +19024,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p618, + context: p1278, freeVariables: Identifiers{ "std", }, @@ -17439,7 +19043,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p618, + context: p1278, freeVariables: Identifiers{ "std", }, @@ -17485,7 +19089,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p765, + context: p1425, freeVariables: Identifiers{ "addDigit", }, @@ -17506,7 +19110,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p765, + context: p1425, freeVariables: Identifiers{ "str", "toDigits", @@ -17526,7 +19130,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p765, + context: p1425, freeVariables: Identifiers{ "toDigits", }, @@ -17549,7 +19153,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p773, + context: p1433, freeVariables: Identifiers{ "str", }, @@ -17576,7 +19180,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p765, + context: p1425, freeVariables: nil, }, Value: float64(0), @@ -17652,7 +19256,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p1440, freeVariables: nil, }, }, @@ -17674,7 +19278,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p1442, freeVariables: nil, }, Value: "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/", @@ -17916,7 +19520,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p1466, freeVariables: Identifiers{ "base64_table", "i", @@ -17940,7 +19544,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p1470, freeVariables: Identifiers{ "base64_table", "i", @@ -17960,7 +19564,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p1473, freeVariables: Identifiers{ "base64_table", }, @@ -17981,7 +19585,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p1476, freeVariables: Identifiers{ "i", }, @@ -18004,7 +19608,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p43, + context: p1479, freeVariables: Identifiers{ "i", }, @@ -18033,7 +19637,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p1482, freeVariables: Identifiers{ "std", }, @@ -18052,7 +19656,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p1485, freeVariables: Identifiers{ "std", }, @@ -18071,7 +19675,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p1488, freeVariables: Identifiers{ "std", }, @@ -18117,7 +19721,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p54, + context: p1493, freeVariables: nil, }, Value: float64(0), @@ -18137,14 +19741,14 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p54, + context: p1495, freeVariables: nil, }, Value: float64(63), OriginalString: "63", }, }, - Named: nil, + Named: []NamedArgument{}, }, TrailingComma: false, TailStrict: false, @@ -18200,7 +19804,7 @@ var StdAst = &DesugaredObject{ }, file: nil, }, - context: p9, + context: p68, freeVariables: Identifiers{ "std", }, @@ -18227,7 +19831,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p782, + context: p1501, freeVariables: Identifiers{ "c", "std", @@ -18354,7 +19958,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p782, + context: p1501, freeVariables: Identifiers{ "std", "str", @@ -18374,7 +19978,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p782, + context: p1501, freeVariables: Identifiers{ "std", }, @@ -18393,7 +19997,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p782, + context: p1501, freeVariables: Identifiers{ "std", }, @@ -18439,7 +20043,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p803, + context: p1522, freeVariables: Identifiers{ "str", }, @@ -18466,7 +20070,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p782, + context: p1501, freeVariables: nil, }, Value: "string", @@ -18494,7 +20098,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p782, + context: p1501, freeVariables: Identifiers{ "std", "str", @@ -18514,7 +20118,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p782, + context: p1501, freeVariables: Identifiers{ "std", "str", @@ -18534,7 +20138,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p782, + context: p1501, freeVariables: nil, }, Value: "std.split first parameter should be a string, got ", @@ -18556,7 +20160,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p782, + context: p1501, freeVariables: Identifiers{ "std", "str", @@ -18576,7 +20180,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p782, + context: p1501, freeVariables: Identifiers{ "std", }, @@ -18595,7 +20199,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p782, + context: p1501, freeVariables: Identifiers{ "std", }, @@ -18641,7 +20245,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p820, + context: p1539, freeVariables: Identifiers{ "str", }, @@ -18670,7 +20274,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p782, + context: p1501, freeVariables: Identifiers{ "c", "std", @@ -18797,7 +20401,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p782, + context: p1501, freeVariables: Identifiers{ "c", "std", @@ -18817,7 +20421,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p782, + context: p1501, freeVariables: Identifiers{ "std", }, @@ -18836,7 +20440,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p782, + context: p1501, freeVariables: Identifiers{ "std", }, @@ -18882,7 +20486,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p843, + context: p1562, freeVariables: Identifiers{ "c", }, @@ -18909,7 +20513,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p782, + context: p1501, freeVariables: nil, }, Value: "string", @@ -18937,7 +20541,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p782, + context: p1501, freeVariables: Identifiers{ "c", "std", @@ -18957,7 +20561,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p782, + context: p1501, freeVariables: Identifiers{ "c", "std", @@ -18977,7 +20581,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p782, + context: p1501, freeVariables: nil, }, Value: "std.split second parameter should be a string, got ", @@ -18999,7 +20603,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p782, + context: p1501, freeVariables: Identifiers{ "c", "std", @@ -19019,7 +20623,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p782, + context: p1501, freeVariables: Identifiers{ "std", }, @@ -19038,7 +20642,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p782, + context: p1501, freeVariables: Identifiers{ "std", }, @@ -19084,7 +20688,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p860, + context: p1579, freeVariables: Identifiers{ "c", }, @@ -19113,7 +20717,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p782, + context: p1501, freeVariables: Identifiers{ "c", "std", @@ -19240,7 +20844,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p782, + context: p1501, freeVariables: Identifiers{ "c", "std", @@ -19260,7 +20864,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p782, + context: p1501, freeVariables: Identifiers{ "std", }, @@ -19279,7 +20883,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p782, + context: p1501, freeVariables: Identifiers{ "std", }, @@ -19325,7 +20929,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p883, + context: p1602, freeVariables: Identifiers{ "c", }, @@ -19352,7 +20956,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p782, + context: p1501, freeVariables: nil, }, Value: float64(1), @@ -19379,7 +20983,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p782, + context: p1501, freeVariables: Identifiers{ "c", "std", @@ -19399,7 +21003,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p782, + context: p1501, freeVariables: Identifiers{ "c", "std", @@ -19419,7 +21023,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p782, + context: p1501, freeVariables: nil, }, Value: "std.split second parameter should have length 1, got ", @@ -19441,7 +21045,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p782, + context: p1501, freeVariables: Identifiers{ "c", "std", @@ -19461,7 +21065,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p782, + context: p1501, freeVariables: Identifiers{ "std", }, @@ -19480,7 +21084,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p782, + context: p1501, freeVariables: Identifiers{ "std", }, @@ -19526,7 +21130,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p900, + context: p1619, freeVariables: Identifiers{ "c", }, @@ -19555,7 +21159,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p782, + context: p1501, freeVariables: Identifiers{ "c", "std", @@ -19576,7 +21180,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p782, + context: p1501, freeVariables: Identifiers{ "std", }, @@ -19595,7 +21199,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p782, + context: p1501, freeVariables: Identifiers{ "std", }, @@ -19641,7 +21245,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p911, + context: p1630, freeVariables: Identifiers{ "str", }, @@ -19662,7 +21266,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p911, + context: p1630, freeVariables: Identifiers{ "c", }, @@ -19683,7 +21287,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p911, + context: p1630, freeVariables: nil, }, Op: UnaryOp(3), @@ -19701,7 +21305,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p911, + context: p1630, freeVariables: nil, }, Value: float64(1), @@ -19778,7 +21382,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p1640, freeVariables: nil, }, }, @@ -19800,7 +21404,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p1642, freeVariables: nil, }, Value: "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/", @@ -20042,7 +21646,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p1666, freeVariables: Identifiers{ "base64_table", "i", @@ -20066,7 +21670,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p1670, freeVariables: Identifiers{ "base64_table", "i", @@ -20086,7 +21690,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p1673, freeVariables: Identifiers{ "base64_table", }, @@ -20107,7 +21711,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p1676, freeVariables: Identifiers{ "i", }, @@ -20130,7 +21734,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p43, + context: p1679, freeVariables: Identifiers{ "i", }, @@ -20159,7 +21763,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p1682, freeVariables: Identifiers{ "std", }, @@ -20178,7 +21782,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p1685, freeVariables: Identifiers{ "std", }, @@ -20197,7 +21801,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p1688, freeVariables: Identifiers{ "std", }, @@ -20243,7 +21847,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p54, + context: p1693, freeVariables: nil, }, Value: float64(0), @@ -20263,14 +21867,14 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p54, + context: p1695, freeVariables: nil, }, Value: float64(63), OriginalString: "63", }, }, - Named: nil, + Named: []NamedArgument{}, }, TrailingComma: false, TailStrict: false, @@ -20326,7 +21930,7 @@ var StdAst = &DesugaredObject{ }, file: nil, }, - context: p9, + context: p68, freeVariables: Identifiers{ "std", }, @@ -20354,7 +21958,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p923, + context: p1701, freeVariables: Identifiers{ "c", "maxsplits", @@ -20482,7 +22086,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p923, + context: p1701, freeVariables: Identifiers{ "std", "str", @@ -20502,7 +22106,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p923, + context: p1701, freeVariables: Identifiers{ "std", }, @@ -20521,7 +22125,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p923, + context: p1701, freeVariables: Identifiers{ "std", }, @@ -20567,7 +22171,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p944, + context: p1722, freeVariables: Identifiers{ "str", }, @@ -20594,7 +22198,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p923, + context: p1701, freeVariables: nil, }, Value: "string", @@ -20622,7 +22226,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p923, + context: p1701, freeVariables: Identifiers{ "std", "str", @@ -20642,7 +22246,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p923, + context: p1701, freeVariables: Identifiers{ "std", "str", @@ -20662,7 +22266,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p923, + context: p1701, freeVariables: nil, }, Value: "std.splitLimit first parameter should be a string, got ", @@ -20684,7 +22288,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p923, + context: p1701, freeVariables: Identifiers{ "std", "str", @@ -20704,7 +22308,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p923, + context: p1701, freeVariables: Identifiers{ "std", }, @@ -20723,7 +22327,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p923, + context: p1701, freeVariables: Identifiers{ "std", }, @@ -20769,7 +22373,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p961, + context: p1739, freeVariables: Identifiers{ "str", }, @@ -20798,7 +22402,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p923, + context: p1701, freeVariables: Identifiers{ "c", "maxsplits", @@ -20926,7 +22530,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p923, + context: p1701, freeVariables: Identifiers{ "c", "std", @@ -20946,7 +22550,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p923, + context: p1701, freeVariables: Identifiers{ "std", }, @@ -20965,7 +22569,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p923, + context: p1701, freeVariables: Identifiers{ "std", }, @@ -21011,7 +22615,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p984, + context: p1762, freeVariables: Identifiers{ "c", }, @@ -21038,7 +22642,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p923, + context: p1701, freeVariables: nil, }, Value: "string", @@ -21066,7 +22670,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p923, + context: p1701, freeVariables: Identifiers{ "c", "std", @@ -21086,7 +22690,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p923, + context: p1701, freeVariables: Identifiers{ "c", "std", @@ -21106,7 +22710,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p923, + context: p1701, freeVariables: nil, }, Value: "std.splitLimit second parameter should be a string, got ", @@ -21128,7 +22732,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p923, + context: p1701, freeVariables: Identifiers{ "c", "std", @@ -21148,7 +22752,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p923, + context: p1701, freeVariables: Identifiers{ "std", }, @@ -21167,7 +22771,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p923, + context: p1701, freeVariables: Identifiers{ "std", }, @@ -21213,7 +22817,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1001, + context: p1779, freeVariables: Identifiers{ "c", }, @@ -21242,7 +22846,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p923, + context: p1701, freeVariables: Identifiers{ "c", "maxsplits", @@ -21370,7 +22974,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p923, + context: p1701, freeVariables: Identifiers{ "c", "std", @@ -21390,7 +22994,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p923, + context: p1701, freeVariables: Identifiers{ "std", }, @@ -21409,7 +23013,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p923, + context: p1701, freeVariables: Identifiers{ "std", }, @@ -21455,7 +23059,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1024, + context: p1802, freeVariables: Identifiers{ "c", }, @@ -21482,7 +23086,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p923, + context: p1701, freeVariables: nil, }, Value: float64(1), @@ -21509,7 +23113,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p923, + context: p1701, freeVariables: Identifiers{ "c", "std", @@ -21529,7 +23133,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p923, + context: p1701, freeVariables: Identifiers{ "c", "std", @@ -21549,7 +23153,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p923, + context: p1701, freeVariables: nil, }, Value: "std.splitLimit second parameter should have length 1, got ", @@ -21571,7 +23175,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p923, + context: p1701, freeVariables: Identifiers{ "c", "std", @@ -21591,7 +23195,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p923, + context: p1701, freeVariables: Identifiers{ "std", }, @@ -21610,7 +23214,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p923, + context: p1701, freeVariables: Identifiers{ "std", }, @@ -21656,7 +23260,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1041, + context: p1819, freeVariables: Identifiers{ "c", }, @@ -21685,7 +23289,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p923, + context: p1701, freeVariables: Identifiers{ "c", "maxsplits", @@ -21813,7 +23417,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p923, + context: p1701, freeVariables: Identifiers{ "maxsplits", "std", @@ -21833,7 +23437,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p923, + context: p1701, freeVariables: Identifiers{ "std", }, @@ -21852,7 +23456,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p923, + context: p1701, freeVariables: Identifiers{ "std", }, @@ -21898,7 +23502,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1064, + context: p1842, freeVariables: Identifiers{ "maxsplits", }, @@ -21925,7 +23529,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p923, + context: p1701, freeVariables: nil, }, Value: "number", @@ -21953,7 +23557,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p923, + context: p1701, freeVariables: Identifiers{ "maxsplits", "std", @@ -21973,7 +23577,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p923, + context: p1701, freeVariables: Identifiers{ "maxsplits", "std", @@ -21993,7 +23597,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p923, + context: p1701, freeVariables: nil, }, Value: "std.splitLimit third parameter should be a number, got ", @@ -22015,7 +23619,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p923, + context: p1701, freeVariables: Identifiers{ "maxsplits", "std", @@ -22035,7 +23639,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p923, + context: p1701, freeVariables: Identifiers{ "std", }, @@ -22054,7 +23658,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p923, + context: p1701, freeVariables: Identifiers{ "std", }, @@ -22100,7 +23704,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1081, + context: p1859, freeVariables: Identifiers{ "maxsplits", }, @@ -22129,7 +23733,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p923, + context: p1701, freeVariables: Identifiers{ "c", "maxsplits", @@ -22154,7 +23758,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1087, + context: p1865, freeVariables: Identifiers{ "aux", "maxsplits", @@ -22186,7 +23790,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1091, + context: p1869, freeVariables: Identifiers{ "arr", "aux", @@ -22215,7 +23819,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1095, + context: p1873, freeVariables: Identifiers{ "i", "str", @@ -22235,7 +23839,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1095, + context: p1873, freeVariables: Identifiers{ "str", }, @@ -22256,7 +23860,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1095, + context: p1873, freeVariables: Identifiers{ "i", }, @@ -22282,7 +23886,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1091, + context: p1869, freeVariables: Identifiers{ "arr", "aux", @@ -22312,7 +23916,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1105, + context: p1883, freeVariables: Identifiers{ "i", }, @@ -22331,7 +23935,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1105, + context: p1883, freeVariables: Identifiers{ "i", }, @@ -22353,7 +23957,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1105, + context: p1883, freeVariables: nil, }, Value: float64(1), @@ -22377,7 +23981,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1091, + context: p1869, freeVariables: Identifiers{ "arr", "aux", @@ -22405,7 +24009,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1091, + context: p1869, freeVariables: Identifiers{ "i", "std", @@ -22426,7 +24030,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1091, + context: p1869, freeVariables: Identifiers{ "i", }, @@ -22448,7 +24052,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1091, + context: p1869, freeVariables: Identifiers{ "std", "str", @@ -22468,7 +24072,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1091, + context: p1869, freeVariables: Identifiers{ "std", }, @@ -22487,7 +24091,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1091, + context: p1869, freeVariables: Identifiers{ "std", }, @@ -22533,7 +24137,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1125, + context: p1903, freeVariables: Identifiers{ "str", }, @@ -22561,7 +24165,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1091, + context: p1869, freeVariables: Identifiers{ "arr", "v", @@ -22581,7 +24185,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1091, + context: p1869, freeVariables: Identifiers{ "arr", }, @@ -22603,7 +24207,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1091, + context: p1869, freeVariables: Identifiers{ "v", }, @@ -22623,7 +24227,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1135, + context: p1913, freeVariables: Identifiers{ "v", }, @@ -22648,7 +24252,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1091, + context: p1869, freeVariables: Identifiers{ "arr", "aux", @@ -22675,7 +24279,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1091, + context: p1869, freeVariables: Identifiers{ "arr", "c", @@ -22784,7 +24388,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1091, + context: p1869, freeVariables: Identifiers{ "c", }, @@ -22805,7 +24409,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1091, + context: p1869, freeVariables: Identifiers{ "delim", }, @@ -22833,7 +24437,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1091, + context: p1869, freeVariables: Identifiers{ "arr", "maxsplits", @@ -22939,7 +24543,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1091, + context: p1869, freeVariables: Identifiers{ "maxsplits", }, @@ -22960,7 +24564,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1091, + context: p1869, freeVariables: nil, }, Op: UnaryOp(3), @@ -22978,7 +24582,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1091, + context: p1869, freeVariables: nil, }, Value: float64(1), @@ -23006,7 +24610,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1091, + context: p1869, freeVariables: Identifiers{ "arr", "maxsplits", @@ -23027,7 +24631,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1091, + context: p1869, freeVariables: Identifiers{ "arr", "std", @@ -23047,7 +24651,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1091, + context: p1869, freeVariables: Identifiers{ "std", }, @@ -23066,7 +24670,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1091, + context: p1869, freeVariables: Identifiers{ "std", }, @@ -23112,7 +24716,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1178, + context: p1956, freeVariables: Identifiers{ "arr", }, @@ -23140,7 +24744,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1091, + context: p1869, freeVariables: Identifiers{ "maxsplits", }, @@ -23164,7 +24768,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1091, + context: p1869, freeVariables: Identifiers{ "arr", "aux", @@ -23188,7 +24792,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1091, + context: p1869, freeVariables: Identifiers{ "aux", }, @@ -23211,7 +24815,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1188, + context: p1966, freeVariables: Identifiers{ "str", }, @@ -23232,7 +24836,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1188, + context: p1966, freeVariables: Identifiers{ "delim", }, @@ -23253,7 +24857,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1188, + context: p1966, freeVariables: Identifiers{ "i2", }, @@ -23274,7 +24878,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1188, + context: p1966, freeVariables: Identifiers{ "arr", "v", @@ -23294,7 +24898,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1188, + context: p1966, freeVariables: Identifiers{ "arr", }, @@ -23316,7 +24920,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1188, + context: p1966, freeVariables: Identifiers{ "v", }, @@ -23336,7 +24940,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1202, + context: p1980, freeVariables: Identifiers{ "v", }, @@ -23361,7 +24965,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1188, + context: p1966, freeVariables: nil, }, Value: "", @@ -23388,7 +24992,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1091, + context: p1869, freeVariables: Identifiers{ "arr", "aux", @@ -23413,7 +25017,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1091, + context: p1869, freeVariables: Identifiers{ "aux", }, @@ -23436,7 +25040,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1211, + context: p1989, freeVariables: Identifiers{ "str", }, @@ -23457,7 +25061,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1211, + context: p1989, freeVariables: Identifiers{ "delim", }, @@ -23478,7 +25082,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1211, + context: p1989, freeVariables: Identifiers{ "i2", }, @@ -23499,7 +25103,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1211, + context: p1989, freeVariables: Identifiers{ "arr", }, @@ -23520,7 +25124,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1211, + context: p1989, freeVariables: Identifiers{ "c", "v", @@ -23540,7 +25144,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1211, + context: p1989, freeVariables: Identifiers{ "v", }, @@ -23562,7 +25166,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1211, + context: p1989, freeVariables: Identifiers{ "c", }, @@ -23598,7 +25202,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p923, + context: p1701, freeVariables: Identifiers{ "aux", "c", @@ -23619,7 +25223,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p923, + context: p1701, freeVariables: Identifiers{ "aux", }, @@ -23642,7 +25246,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1231, + context: p2009, freeVariables: Identifiers{ "str", }, @@ -23663,7 +25267,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1231, + context: p2009, freeVariables: Identifiers{ "c", }, @@ -23684,7 +25288,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1231, + context: p2009, freeVariables: nil, }, Value: float64(0), @@ -23704,7 +25308,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1231, + context: p2009, freeVariables: nil, }, Elements: nil, @@ -23724,7 +25328,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1231, + context: p2009, freeVariables: nil, }, Value: "", @@ -23803,7 +25407,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p2020, freeVariables: nil, }, }, @@ -23825,7 +25429,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p2022, freeVariables: nil, }, Value: "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/", @@ -24067,7 +25671,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p2046, freeVariables: Identifiers{ "base64_table", "i", @@ -24091,7 +25695,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p2050, freeVariables: Identifiers{ "base64_table", "i", @@ -24111,7 +25715,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p2053, freeVariables: Identifiers{ "base64_table", }, @@ -24132,7 +25736,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p2056, freeVariables: Identifiers{ "i", }, @@ -24155,7 +25759,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p43, + context: p2059, freeVariables: Identifiers{ "i", }, @@ -24184,7 +25788,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p2062, freeVariables: Identifiers{ "std", }, @@ -24203,7 +25807,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p2065, freeVariables: Identifiers{ "std", }, @@ -24222,7 +25826,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p2068, freeVariables: Identifiers{ "std", }, @@ -24268,7 +25872,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p54, + context: p2073, freeVariables: nil, }, Value: float64(0), @@ -24288,14 +25892,14 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p54, + context: p2075, freeVariables: nil, }, Value: float64(63), OriginalString: "63", }, }, - Named: nil, + Named: []NamedArgument{}, }, TrailingComma: false, TailStrict: false, @@ -24351,7 +25955,7 @@ var StdAst = &DesugaredObject{ }, file: nil, }, - context: p9, + context: p68, freeVariables: Identifiers{ "std", }, @@ -24486,7 +26090,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1254, + context: p2091, freeVariables: Identifiers{ "std", "str", @@ -24506,7 +26110,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1254, + context: p2091, freeVariables: Identifiers{ "std", }, @@ -24525,7 +26129,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1254, + context: p2091, freeVariables: Identifiers{ "std", }, @@ -24571,7 +26175,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1263, + context: p2100, freeVariables: Identifiers{ "str", }, @@ -24598,7 +26202,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1254, + context: p2091, freeVariables: nil, }, Value: "string", @@ -24732,7 +26336,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1254, + context: p2091, freeVariables: Identifiers{ "from", "std", @@ -24752,7 +26356,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1254, + context: p2091, freeVariables: Identifiers{ "std", }, @@ -24771,7 +26375,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1254, + context: p2091, freeVariables: Identifiers{ "std", }, @@ -24817,7 +26421,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1285, + context: p2122, freeVariables: Identifiers{ "from", }, @@ -24844,7 +26448,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1254, + context: p2091, freeVariables: nil, }, Value: "string", @@ -24978,7 +26582,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1254, + context: p2091, freeVariables: Identifiers{ "std", "to", @@ -24998,7 +26602,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1254, + context: p2091, freeVariables: Identifiers{ "std", }, @@ -25017,7 +26621,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1254, + context: p2091, freeVariables: Identifiers{ "std", }, @@ -25063,7 +26667,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1307, + context: p2144, freeVariables: Identifiers{ "to", }, @@ -25090,7 +26694,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1254, + context: p2091, freeVariables: nil, }, Value: "string", @@ -25245,7 +26849,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1254, + context: p2091, freeVariables: Identifiers{ "from", }, @@ -25266,7 +26870,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1254, + context: p2091, freeVariables: nil, }, Value: "", @@ -25294,7 +26898,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1254, + context: p2091, freeVariables: Identifiers{ "from", "std", @@ -25319,7 +26923,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1329, + context: p2166, freeVariables: Identifiers{ "std", "str", @@ -25339,7 +26943,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1329, + context: p2166, freeVariables: Identifiers{ "std", }, @@ -25358,7 +26962,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1329, + context: p2166, freeVariables: Identifiers{ "std", }, @@ -25404,7 +27008,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1338, + context: p2175, freeVariables: Identifiers{ "str", }, @@ -25434,7 +27038,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1254, + context: p2091, freeVariables: Identifiers{ "from", "std", @@ -25460,7 +27064,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1344, + context: p2181, freeVariables: Identifiers{ "from", "std", @@ -25480,7 +27084,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1344, + context: p2181, freeVariables: Identifiers{ "std", }, @@ -25499,7 +27103,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1344, + context: p2181, freeVariables: Identifiers{ "std", }, @@ -25545,7 +27149,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1353, + context: p2190, freeVariables: Identifiers{ "from", }, @@ -25575,7 +27179,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1254, + context: p2091, freeVariables: Identifiers{ "from", "from_len", @@ -25602,7 +27206,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1359, + context: p2196, freeVariables: Identifiers{ "from", "from_len", @@ -25806,7 +27410,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1379, + context: p2216, freeVariables: Identifiers{ "str", }, @@ -25827,7 +27431,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1379, + context: p2216, freeVariables: Identifiers{ "i", }, @@ -25848,7 +27452,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1379, + context: p2216, freeVariables: Identifiers{ "from_len", "i", @@ -25868,7 +27472,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1379, + context: p2216, freeVariables: Identifiers{ "i", }, @@ -25890,7 +27494,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1379, + context: p2216, freeVariables: Identifiers{ "from_len", }, @@ -25936,7 +27540,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1379, + context: p2216, freeVariables: Identifiers{ "from", }, @@ -25967,7 +27571,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1254, + context: p2091, freeVariables: Identifiers{ "found_at", "from", @@ -25995,7 +27599,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1396, + context: p2233, freeVariables: Identifiers{ "found_at", "from", @@ -26029,7 +27633,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1400, + context: p2237, freeVariables: Identifiers{ "acc", "curr_index", @@ -26057,7 +27661,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1400, + context: p2237, freeVariables: Identifiers{ "curr_index", "str_len", @@ -26077,7 +27681,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1400, + context: p2237, freeVariables: Identifiers{ "curr_index", }, @@ -26099,7 +27703,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1400, + context: p2237, freeVariables: Identifiers{ "str_len", }, @@ -26121,7 +27725,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1400, + context: p2237, freeVariables: Identifiers{ "acc", "curr_index", @@ -26144,7 +27748,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1400, + context: p2237, freeVariables: Identifiers{ "acc", }, @@ -26253,7 +27857,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1400, + context: p2237, freeVariables: Identifiers{ "str", }, @@ -26274,7 +27878,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1400, + context: p2237, freeVariables: Identifiers{ "start_index", }, @@ -26295,7 +27899,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1400, + context: p2237, freeVariables: Identifiers{ "curr_index", }, @@ -26341,7 +27945,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1400, + context: p2237, freeVariables: Identifiers{ "acc", "curr_index", @@ -26368,7 +27972,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1400, + context: p2237, freeVariables: Identifiers{ "curr_index", "found_at", @@ -26388,7 +27992,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1400, + context: p2237, freeVariables: Identifiers{ "found_at", }, @@ -26411,7 +28015,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1435, + context: p2272, freeVariables: Identifiers{ "curr_index", }, @@ -26438,7 +28042,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1400, + context: p2237, freeVariables: Identifiers{ "acc", "curr_index", @@ -26467,7 +28071,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1441, + context: p2278, freeVariables: Identifiers{ "curr_index", "from", @@ -26488,7 +28092,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1441, + context: p2278, freeVariables: Identifiers{ "curr_index", }, @@ -26510,7 +28114,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1441, + context: p2278, freeVariables: Identifiers{ "from", "std", @@ -26530,7 +28134,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1441, + context: p2278, freeVariables: Identifiers{ "std", }, @@ -26549,7 +28153,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1441, + context: p2278, freeVariables: Identifiers{ "std", }, @@ -26595,7 +28199,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1454, + context: p2291, freeVariables: Identifiers{ "from", }, @@ -26626,7 +28230,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1400, + context: p2237, freeVariables: Identifiers{ "acc", "curr_index", @@ -26652,7 +28256,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1400, + context: p2237, freeVariables: Identifiers{ "replace_after", }, @@ -26675,7 +28279,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1462, + context: p2299, freeVariables: Identifiers{ "new_index", }, @@ -26696,7 +28300,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1462, + context: p2299, freeVariables: Identifiers{ "new_index", }, @@ -26717,7 +28321,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1462, + context: p2299, freeVariables: Identifiers{ "acc", "curr_index", @@ -26741,7 +28345,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1462, + context: p2299, freeVariables: Identifiers{ "acc", "curr_index", @@ -26764,7 +28368,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1462, + context: p2299, freeVariables: Identifiers{ "acc", }, @@ -26873,7 +28477,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1462, + context: p2299, freeVariables: Identifiers{ "str", }, @@ -26894,7 +28498,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1462, + context: p2299, freeVariables: Identifiers{ "start_index", }, @@ -26915,7 +28519,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1462, + context: p2299, freeVariables: Identifiers{ "curr_index", }, @@ -26962,7 +28566,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1462, + context: p2299, freeVariables: Identifiers{ "to", }, @@ -26991,7 +28595,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1400, + context: p2237, freeVariables: Identifiers{ "acc", "curr_index", @@ -27013,7 +28617,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1400, + context: p2237, freeVariables: Identifiers{ "replace_after", }, @@ -27036,7 +28640,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1495, + context: p2332, freeVariables: Identifiers{ "start_index", }, @@ -27057,7 +28661,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1495, + context: p2332, freeVariables: Identifiers{ "curr_index", }, @@ -27076,7 +28680,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1495, + context: p2332, freeVariables: Identifiers{ "curr_index", }, @@ -27098,7 +28702,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1495, + context: p2332, freeVariables: nil, }, Value: float64(1), @@ -27119,7 +28723,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1495, + context: p2332, freeVariables: Identifiers{ "acc", }, @@ -27152,7 +28756,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1254, + context: p2091, freeVariables: Identifiers{ "from", "from_len", @@ -27261,7 +28865,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1254, + context: p2091, freeVariables: Identifiers{ "from_len", }, @@ -27282,7 +28886,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1254, + context: p2091, freeVariables: nil, }, Value: float64(1), @@ -27308,7 +28912,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1254, + context: p2091, freeVariables: Identifiers{ "from", "std", @@ -27330,7 +28934,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1254, + context: p2091, freeVariables: Identifiers{ "std", }, @@ -27349,7 +28953,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1254, + context: p2091, freeVariables: Identifiers{ "std", }, @@ -27395,7 +28999,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1526, + context: p2363, freeVariables: Identifiers{ "to", }, @@ -27416,7 +29020,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1526, + context: p2363, freeVariables: Identifiers{ "from", "std", @@ -27437,7 +29041,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1526, + context: p2363, freeVariables: Identifiers{ "std", }, @@ -27456,7 +29060,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1526, + context: p2363, freeVariables: Identifiers{ "std", }, @@ -27502,7 +29106,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1537, + context: p2374, freeVariables: Identifiers{ "str", }, @@ -27523,7 +29127,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1537, + context: p2374, freeVariables: Identifiers{ "from", }, @@ -27556,7 +29160,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1254, + context: p2091, freeVariables: Identifiers{ "replace_after", }, @@ -27575,7 +29179,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1254, + context: p2091, freeVariables: Identifiers{ "replace_after", }, @@ -27598,7 +29202,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1547, + context: p2384, freeVariables: nil, }, Value: float64(0), @@ -27618,7 +29222,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1547, + context: p2384, freeVariables: nil, }, Value: float64(0), @@ -27638,7 +29242,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1547, + context: p2384, freeVariables: nil, }, Value: "", @@ -27687,7 +29291,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1254, + context: p2091, freeVariables: nil, }, Value: "'from' string must not be zero length.", @@ -27877,7 +29481,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p2399, freeVariables: nil, }, }, @@ -27899,7 +29503,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p2401, freeVariables: nil, }, Value: "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/", @@ -28141,7 +29745,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p2425, freeVariables: Identifiers{ "base64_table", "i", @@ -28165,7 +29769,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p2429, freeVariables: Identifiers{ "base64_table", "i", @@ -28185,7 +29789,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p2432, freeVariables: Identifiers{ "base64_table", }, @@ -28206,7 +29810,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p2435, freeVariables: Identifiers{ "i", }, @@ -28229,7 +29833,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p43, + context: p2438, freeVariables: Identifiers{ "i", }, @@ -28258,7 +29862,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p2441, freeVariables: Identifiers{ "std", }, @@ -28277,7 +29881,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p2444, freeVariables: Identifiers{ "std", }, @@ -28296,7 +29900,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p2447, freeVariables: Identifiers{ "std", }, @@ -28342,7 +29946,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p54, + context: p2452, freeVariables: nil, }, Value: float64(0), @@ -28362,14 +29966,14 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p54, + context: p2454, freeVariables: nil, }, Value: float64(63), OriginalString: "63", }, }, - Named: nil, + Named: []NamedArgument{}, }, TrailingComma: false, TailStrict: false, @@ -28425,7 +30029,7 @@ var StdAst = &DesugaredObject{ }, file: nil, }, - context: p9, + context: p68, freeVariables: Identifiers{ "std", }, @@ -28451,7 +30055,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1564, + context: p2460, freeVariables: Identifiers{ "std", "x", @@ -28474,7 +30078,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1568, + context: p2464, freeVariables: Identifiers{ "std", }, @@ -28493,7 +30097,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1568, + context: p2464, freeVariables: Identifiers{ "std", }, @@ -28540,7 +30144,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1564, + context: p2460, freeVariables: Identifiers{ "cp", "std", @@ -28564,7 +30168,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1577, + context: p2473, freeVariables: Identifiers{ "cp", "std", @@ -28591,7 +30195,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1581, + context: p2477, freeVariables: Identifiers{ "c", "cp", @@ -28612,7 +30216,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1581, + context: p2477, freeVariables: Identifiers{ "c", "cp", @@ -28632,7 +30236,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1581, + context: p2477, freeVariables: Identifiers{ "c", "cp", @@ -28652,7 +30256,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1581, + context: p2477, freeVariables: Identifiers{ "c", "cp", @@ -28672,7 +30276,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1581, + context: p2477, freeVariables: Identifiers{ "cp", }, @@ -28695,7 +30299,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1593, + context: p2489, freeVariables: Identifiers{ "c", }, @@ -28723,7 +30327,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1581, + context: p2477, freeVariables: nil, }, Value: float64(97), @@ -28745,7 +30349,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1581, + context: p2477, freeVariables: Identifiers{ "c", "cp", @@ -28765,7 +30369,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1581, + context: p2477, freeVariables: Identifiers{ "c", "cp", @@ -28785,7 +30389,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1581, + context: p2477, freeVariables: Identifiers{ "cp", }, @@ -28808,7 +30412,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1604, + context: p2500, freeVariables: Identifiers{ "c", }, @@ -28836,7 +30440,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1581, + context: p2477, freeVariables: nil, }, Value: float64(123), @@ -28858,7 +30462,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1581, + context: p2477, freeVariables: Identifiers{ "c", "cp", @@ -28879,7 +30483,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1581, + context: p2477, freeVariables: Identifiers{ "std", }, @@ -28898,7 +30502,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1581, + context: p2477, freeVariables: Identifiers{ "std", }, @@ -28944,7 +30548,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1616, + context: p2512, freeVariables: Identifiers{ "c", "cp", @@ -28964,7 +30568,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1616, + context: p2512, freeVariables: Identifiers{ "c", "cp", @@ -28984,7 +30588,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1616, + context: p2512, freeVariables: Identifiers{ "cp", }, @@ -29007,7 +30611,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1624, + context: p2520, freeVariables: Identifiers{ "c", }, @@ -29035,7 +30639,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1616, + context: p2512, freeVariables: nil, }, Value: float64(32), @@ -29062,7 +30666,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1581, + context: p2477, freeVariables: Identifiers{ "c", }, @@ -29088,7 +30692,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1564, + context: p2460, freeVariables: Identifiers{ "std", "up_letter", @@ -29109,7 +30713,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1564, + context: p2460, freeVariables: Identifiers{ "std", }, @@ -29128,7 +30732,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1564, + context: p2460, freeVariables: Identifiers{ "std", }, @@ -29174,7 +30778,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1638, + context: p2534, freeVariables: nil, }, Value: "", @@ -29195,7 +30799,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1638, + context: p2534, freeVariables: Identifiers{ "std", "up_letter", @@ -29216,7 +30820,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1638, + context: p2534, freeVariables: Identifiers{ "std", }, @@ -29235,7 +30839,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1638, + context: p2534, freeVariables: Identifiers{ "std", }, @@ -29281,7 +30885,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1648, + context: p2544, freeVariables: Identifiers{ "up_letter", }, @@ -29302,7 +30906,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1648, + context: p2544, freeVariables: Identifiers{ "std", "x", @@ -29322,7 +30926,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1648, + context: p2544, freeVariables: Identifiers{ "std", }, @@ -29341,7 +30945,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1648, + context: p2544, freeVariables: Identifiers{ "std", }, @@ -29387,7 +30991,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1659, + context: p2555, freeVariables: Identifiers{ "x", }, @@ -29475,7 +31079,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p2561, freeVariables: nil, }, }, @@ -29497,7 +31101,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p2563, freeVariables: nil, }, Value: "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/", @@ -29739,7 +31343,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p2587, freeVariables: Identifiers{ "base64_table", "i", @@ -29763,7 +31367,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p2591, freeVariables: Identifiers{ "base64_table", "i", @@ -29783,7 +31387,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p2594, freeVariables: Identifiers{ "base64_table", }, @@ -29804,7 +31408,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p2597, freeVariables: Identifiers{ "i", }, @@ -29827,7 +31431,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p43, + context: p2600, freeVariables: Identifiers{ "i", }, @@ -29856,7 +31460,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p2603, freeVariables: Identifiers{ "std", }, @@ -29875,7 +31479,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p2606, freeVariables: Identifiers{ "std", }, @@ -29894,7 +31498,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p2609, freeVariables: Identifiers{ "std", }, @@ -29940,7 +31544,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p54, + context: p2614, freeVariables: nil, }, Value: float64(0), @@ -29960,14 +31564,14 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p54, + context: p2616, freeVariables: nil, }, Value: float64(63), OriginalString: "63", }, }, - Named: nil, + Named: []NamedArgument{}, }, TrailingComma: false, TailStrict: false, @@ -30023,7 +31627,7 @@ var StdAst = &DesugaredObject{ }, file: nil, }, - context: p9, + context: p68, freeVariables: Identifiers{ "std", }, @@ -30049,7 +31653,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1667, + context: p2622, freeVariables: Identifiers{ "std", "x", @@ -30072,7 +31676,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1671, + context: p2626, freeVariables: Identifiers{ "std", }, @@ -30091,7 +31695,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1671, + context: p2626, freeVariables: Identifiers{ "std", }, @@ -30138,7 +31742,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1667, + context: p2622, freeVariables: Identifiers{ "cp", "std", @@ -30162,7 +31766,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1680, + context: p2635, freeVariables: Identifiers{ "cp", "std", @@ -30189,7 +31793,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1684, + context: p2639, freeVariables: Identifiers{ "c", "cp", @@ -30210,7 +31814,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1684, + context: p2639, freeVariables: Identifiers{ "c", "cp", @@ -30230,7 +31834,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1684, + context: p2639, freeVariables: Identifiers{ "c", "cp", @@ -30250,7 +31854,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1684, + context: p2639, freeVariables: Identifiers{ "c", "cp", @@ -30270,7 +31874,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1684, + context: p2639, freeVariables: Identifiers{ "cp", }, @@ -30293,7 +31897,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1696, + context: p2651, freeVariables: Identifiers{ "c", }, @@ -30321,7 +31925,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1684, + context: p2639, freeVariables: nil, }, Value: float64(65), @@ -30343,7 +31947,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1684, + context: p2639, freeVariables: Identifiers{ "c", "cp", @@ -30363,7 +31967,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1684, + context: p2639, freeVariables: Identifiers{ "c", "cp", @@ -30383,7 +31987,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1684, + context: p2639, freeVariables: Identifiers{ "cp", }, @@ -30406,7 +32010,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1707, + context: p2662, freeVariables: Identifiers{ "c", }, @@ -30434,7 +32038,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1684, + context: p2639, freeVariables: nil, }, Value: float64(91), @@ -30456,7 +32060,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1684, + context: p2639, freeVariables: Identifiers{ "c", "cp", @@ -30477,7 +32081,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1684, + context: p2639, freeVariables: Identifiers{ "std", }, @@ -30496,7 +32100,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1684, + context: p2639, freeVariables: Identifiers{ "std", }, @@ -30542,7 +32146,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1719, + context: p2674, freeVariables: Identifiers{ "c", "cp", @@ -30562,7 +32166,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1719, + context: p2674, freeVariables: Identifiers{ "c", "cp", @@ -30582,7 +32186,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1719, + context: p2674, freeVariables: Identifiers{ "cp", }, @@ -30605,7 +32209,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1727, + context: p2682, freeVariables: Identifiers{ "c", }, @@ -30633,7 +32237,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1719, + context: p2674, freeVariables: nil, }, Value: float64(32), @@ -30660,7 +32264,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1684, + context: p2639, freeVariables: Identifiers{ "c", }, @@ -30686,7 +32290,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1667, + context: p2622, freeVariables: Identifiers{ "down_letter", "std", @@ -30707,7 +32311,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1667, + context: p2622, freeVariables: Identifiers{ "std", }, @@ -30726,7 +32330,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1667, + context: p2622, freeVariables: Identifiers{ "std", }, @@ -30772,7 +32376,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1741, + context: p2696, freeVariables: nil, }, Value: "", @@ -30793,7 +32397,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1741, + context: p2696, freeVariables: Identifiers{ "down_letter", "std", @@ -30814,7 +32418,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1741, + context: p2696, freeVariables: Identifiers{ "std", }, @@ -30833,7 +32437,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1741, + context: p2696, freeVariables: Identifiers{ "std", }, @@ -30879,7 +32483,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1751, + context: p2706, freeVariables: Identifiers{ "down_letter", }, @@ -30900,7 +32504,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1751, + context: p2706, freeVariables: Identifiers{ "std", "x", @@ -30920,7 +32524,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1751, + context: p2706, freeVariables: Identifiers{ "std", }, @@ -30939,7 +32543,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1751, + context: p2706, freeVariables: Identifiers{ "std", }, @@ -30985,7 +32589,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1762, + context: p2717, freeVariables: Identifiers{ "x", }, @@ -31073,7 +32677,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p2723, freeVariables: nil, }, }, @@ -31095,7 +32699,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p2725, freeVariables: nil, }, Value: "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/", @@ -31337,7 +32941,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p2749, freeVariables: Identifiers{ "base64_table", "i", @@ -31361,7 +32965,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p2753, freeVariables: Identifiers{ "base64_table", "i", @@ -31381,7 +32985,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p2756, freeVariables: Identifiers{ "base64_table", }, @@ -31402,7 +33006,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p2759, freeVariables: Identifiers{ "i", }, @@ -31425,7 +33029,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p43, + context: p2762, freeVariables: Identifiers{ "i", }, @@ -31454,7 +33058,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p2765, freeVariables: Identifiers{ "std", }, @@ -31473,7 +33077,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p2768, freeVariables: Identifiers{ "std", }, @@ -31492,7 +33096,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p2771, freeVariables: Identifiers{ "std", }, @@ -31538,7 +33142,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p54, + context: p2776, freeVariables: nil, }, Value: float64(0), @@ -31558,14 +33162,14 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p54, + context: p2778, freeVariables: nil, }, Value: float64(63), OriginalString: "63", }, }, - Named: nil, + Named: []NamedArgument{}, }, TrailingComma: false, TailStrict: false, @@ -31621,7 +33225,7 @@ var StdAst = &DesugaredObject{ }, file: nil, }, - context: p9, + context: p68, freeVariables: Identifiers{ "std", }, @@ -31648,7 +33252,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1770, + context: p2784, freeVariables: Identifiers{ "from", "std", @@ -31669,7 +33273,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1770, + context: p2784, freeVariables: Identifiers{ "std", }, @@ -31688,7 +33292,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1770, + context: p2784, freeVariables: Identifiers{ "std", }, @@ -31734,7 +33338,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1779, + context: p2793, freeVariables: Identifiers{ "from", "to", @@ -31754,7 +33358,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1779, + context: p2793, freeVariables: Identifiers{ "from", "to", @@ -31774,7 +33378,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1779, + context: p2793, freeVariables: Identifiers{ "to", }, @@ -31796,7 +33400,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1779, + context: p2793, freeVariables: Identifiers{ "from", }, @@ -31819,7 +33423,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1779, + context: p2793, freeVariables: nil, }, Value: float64(1), @@ -31840,7 +33444,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1779, + context: p2793, freeVariables: Identifiers{ "from", }, @@ -31866,7 +33470,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1792, + context: p2806, freeVariables: Identifiers{ "from", "i", @@ -31886,7 +33490,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1792, + context: p2806, freeVariables: Identifiers{ "i", }, @@ -31908,7 +33512,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1792, + context: p2806, freeVariables: Identifiers{ "from", }, @@ -31984,7 +33588,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p2816, freeVariables: nil, }, }, @@ -32006,7 +33610,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p2818, freeVariables: nil, }, Value: "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/", @@ -32248,7 +33852,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p2842, freeVariables: Identifiers{ "base64_table", "i", @@ -32272,7 +33876,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p2846, freeVariables: Identifiers{ "base64_table", "i", @@ -32292,7 +33896,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p2849, freeVariables: Identifiers{ "base64_table", }, @@ -32313,7 +33917,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p2852, freeVariables: Identifiers{ "i", }, @@ -32336,7 +33940,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p43, + context: p2855, freeVariables: Identifiers{ "i", }, @@ -32365,7 +33969,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p2858, freeVariables: Identifiers{ "std", }, @@ -32384,7 +33988,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p2861, freeVariables: Identifiers{ "std", }, @@ -32403,7 +34007,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p2864, freeVariables: Identifiers{ "std", }, @@ -32449,7 +34053,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p54, + context: p2869, freeVariables: nil, }, Value: float64(0), @@ -32469,14 +34073,14 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p54, + context: p2871, freeVariables: nil, }, Value: float64(63), OriginalString: "63", }, }, - Named: nil, + Named: []NamedArgument{}, }, TrailingComma: false, TailStrict: false, @@ -32532,7 +34136,7 @@ var StdAst = &DesugaredObject{ }, file: nil, }, - context: p9, + context: p68, freeVariables: Identifiers{ "std", }, @@ -32561,7 +34165,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1804, + context: p2877, freeVariables: Identifiers{ "end", "index", @@ -32587,7 +34191,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1808, + context: p2881, freeVariables: Identifiers{ "end", "index", @@ -32635,7 +34239,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1813, + context: p2886, freeVariables: Identifiers{ "indexable", }, @@ -32681,7 +34285,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1813, + context: p2886, freeVariables: Identifiers{ "index", "std", @@ -32786,7 +34390,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1813, + context: p2886, freeVariables: Identifiers{ "index", }, @@ -32807,7 +34411,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1813, + context: p2886, freeVariables: nil, }, }, @@ -32831,7 +34435,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1813, + context: p2886, freeVariables: nil, }, Value: float64(0), @@ -32851,7 +34455,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1813, + context: p2886, freeVariables: Identifiers{ "index", }, @@ -32898,7 +34502,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1813, + context: p2886, freeVariables: Identifiers{ "end", "indexable", @@ -33004,7 +34608,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1813, + context: p2886, freeVariables: Identifiers{ "end", }, @@ -33025,7 +34629,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1813, + context: p2886, freeVariables: nil, }, }, @@ -33049,7 +34653,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1813, + context: p2886, freeVariables: Identifiers{ "indexable", "std", @@ -33069,7 +34673,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1813, + context: p2886, freeVariables: Identifiers{ "std", }, @@ -33088,7 +34692,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1813, + context: p2886, freeVariables: Identifiers{ "std", }, @@ -33134,7 +34738,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1855, + context: p2928, freeVariables: Identifiers{ "indexable", }, @@ -33161,7 +34765,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1813, + context: p2886, freeVariables: Identifiers{ "end", }, @@ -33208,7 +34812,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1813, + context: p2886, freeVariables: Identifiers{ "std", "step", @@ -33313,7 +34917,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1813, + context: p2886, freeVariables: Identifiers{ "step", }, @@ -33334,7 +34938,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1813, + context: p2886, freeVariables: nil, }, }, @@ -33358,7 +34962,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1813, + context: p2886, freeVariables: nil, }, Value: float64(1), @@ -33378,7 +34982,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1813, + context: p2886, freeVariables: Identifiers{ "step", }, @@ -33425,7 +35029,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1813, + context: p2886, freeVariables: Identifiers{ "indexable", "std", @@ -33445,7 +35049,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1813, + context: p2886, freeVariables: Identifiers{ "std", }, @@ -33464,7 +35068,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1813, + context: p2886, freeVariables: Identifiers{ "std", }, @@ -33510,7 +35114,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1886, + context: p2959, freeVariables: Identifiers{ "indexable", }, @@ -33562,7 +35166,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1813, + context: p2886, freeVariables: Identifiers{ "indexable", "std", @@ -33582,7 +35186,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1813, + context: p2886, freeVariables: Identifiers{ "std", }, @@ -33601,7 +35205,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1813, + context: p2886, freeVariables: Identifiers{ "std", }, @@ -33647,7 +35251,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1898, + context: p2971, freeVariables: Identifiers{ "indexable", }, @@ -33681,7 +35285,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1804, + context: p2877, freeVariables: Identifiers{ "indexable", "invar", @@ -33703,7 +35307,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1804, + context: p2877, freeVariables: Identifiers{ "invar", }, @@ -33722,7 +35326,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1804, + context: p2877, freeVariables: Identifiers{ "invar", }, @@ -33741,7 +35345,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1804, + context: p2877, freeVariables: Identifiers{ "invar", }, @@ -33760,7 +35364,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1804, + context: p2877, freeVariables: Identifiers{ "invar", }, @@ -33779,7 +35383,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1804, + context: p2877, freeVariables: Identifiers{ "invar", }, @@ -33824,7 +35428,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1804, + context: p2877, freeVariables: nil, }, Value: float64(0), @@ -33846,7 +35450,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1804, + context: p2877, freeVariables: Identifiers{ "invar", }, @@ -33865,7 +35469,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1804, + context: p2877, freeVariables: Identifiers{ "invar", }, @@ -33884,7 +35488,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1804, + context: p2877, freeVariables: Identifiers{ "invar", }, @@ -33929,7 +35533,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1804, + context: p2877, freeVariables: nil, }, Value: float64(0), @@ -33952,7 +35556,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1804, + context: p2877, freeVariables: Identifiers{ "invar", }, @@ -33971,7 +35575,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1804, + context: p2877, freeVariables: Identifiers{ "invar", }, @@ -33990,7 +35594,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1804, + context: p2877, freeVariables: Identifiers{ "invar", }, @@ -34035,7 +35639,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1804, + context: p2877, freeVariables: nil, }, Value: float64(0), @@ -34057,7 +35661,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1804, + context: p2877, freeVariables: Identifiers{ "invar", "std", @@ -34162,7 +35766,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1804, + context: p2877, freeVariables: nil, }, Value: "got [%s:%s:%s] but negative index, end, and steps are not supported", @@ -34183,7 +35787,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1804, + context: p2877, freeVariables: Identifiers{ "invar", }, @@ -34203,7 +35807,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1945, + context: p3018, freeVariables: Identifiers{ "invar", }, @@ -34222,7 +35826,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1945, + context: p3018, freeVariables: Identifiers{ "invar", }, @@ -34266,7 +35870,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1945, + context: p3018, freeVariables: Identifiers{ "invar", }, @@ -34285,7 +35889,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1945, + context: p3018, freeVariables: Identifiers{ "invar", }, @@ -34329,7 +35933,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1945, + context: p3018, freeVariables: Identifiers{ "invar", }, @@ -34348,7 +35952,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1945, + context: p3018, freeVariables: Identifiers{ "invar", }, @@ -34402,7 +36006,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1804, + context: p2877, freeVariables: Identifiers{ "indexable", "invar", @@ -34509,7 +36113,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1804, + context: p2877, freeVariables: Identifiers{ "step", }, @@ -34530,7 +36134,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1804, + context: p2877, freeVariables: nil, }, Value: float64(0), @@ -34556,7 +36160,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1804, + context: p2877, freeVariables: Identifiers{ "std", "step", @@ -34661,7 +36265,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1804, + context: p2877, freeVariables: nil, }, Value: "got %s but step must be greater than 0", @@ -34682,7 +36286,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1804, + context: p2877, freeVariables: Identifiers{ "step", }, @@ -34710,7 +36314,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1804, + context: p2877, freeVariables: Identifiers{ "indexable", "invar", @@ -34731,7 +36335,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1804, + context: p2877, freeVariables: Identifiers{ "indexable", "std", @@ -34857,7 +36461,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1804, + context: p2877, freeVariables: Identifiers{ "indexable", "std", @@ -34877,7 +36481,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1804, + context: p2877, freeVariables: Identifiers{ "std", }, @@ -34896,7 +36500,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1804, + context: p2877, freeVariables: Identifiers{ "std", }, @@ -34942,7 +36546,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2009, + context: p3082, freeVariables: Identifiers{ "indexable", }, @@ -34969,7 +36573,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1804, + context: p2877, freeVariables: nil, }, Value: "string", @@ -35104,7 +36708,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1804, + context: p2877, freeVariables: Identifiers{ "indexable", "std", @@ -35124,7 +36728,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1804, + context: p2877, freeVariables: Identifiers{ "std", }, @@ -35143,7 +36747,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1804, + context: p2877, freeVariables: Identifiers{ "std", }, @@ -35189,7 +36793,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2031, + context: p3104, freeVariables: Identifiers{ "indexable", }, @@ -35216,7 +36820,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1804, + context: p2877, freeVariables: nil, }, Value: "array", @@ -35245,7 +36849,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1804, + context: p2877, freeVariables: Identifiers{ "indexable", "std", @@ -35350,7 +36954,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1804, + context: p2877, freeVariables: nil, }, Value: "std.slice accepts a string or an array, but got: %s", @@ -35371,7 +36975,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1804, + context: p2877, freeVariables: Identifiers{ "indexable", "std", @@ -35391,7 +36995,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1804, + context: p2877, freeVariables: Identifiers{ "std", }, @@ -35410,7 +37014,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1804, + context: p2877, freeVariables: Identifiers{ "std", }, @@ -35456,7 +37060,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2054, + context: p3127, freeVariables: Identifiers{ "indexable", }, @@ -35490,7 +37094,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1804, + context: p2877, freeVariables: Identifiers{ "invar", "std", @@ -35513,7 +37117,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2060, + context: p3133, freeVariables: Identifiers{ "build", "invar", @@ -35542,7 +37146,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2064, + context: p3137, freeVariables: Identifiers{ "build", "cur", @@ -35565,7 +37169,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2064, + context: p3137, freeVariables: Identifiers{ "cur", "invar", @@ -35585,7 +37189,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2064, + context: p3137, freeVariables: Identifiers{ "cur", "invar", @@ -35605,7 +37209,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2064, + context: p3137, freeVariables: Identifiers{ "cur", }, @@ -35627,7 +37231,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2064, + context: p3137, freeVariables: Identifiers{ "invar", }, @@ -35646,7 +37250,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2064, + context: p3137, freeVariables: Identifiers{ "invar", }, @@ -35692,7 +37296,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2064, + context: p3137, freeVariables: Identifiers{ "cur", "invar", @@ -35712,7 +37316,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2064, + context: p3137, freeVariables: Identifiers{ "cur", }, @@ -35734,7 +37338,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2064, + context: p3137, freeVariables: Identifiers{ "invar", }, @@ -35753,7 +37357,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2064, + context: p3137, freeVariables: Identifiers{ "invar", }, @@ -35799,7 +37403,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2064, + context: p3137, freeVariables: Identifiers{ "slice", }, @@ -35820,7 +37424,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2064, + context: p3137, freeVariables: Identifiers{ "build", "cur", @@ -35843,7 +37447,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2064, + context: p3137, freeVariables: Identifiers{ "build", }, @@ -35866,7 +37470,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2094, + context: p3167, freeVariables: Identifiers{ "cur", "invar", @@ -35973,7 +37577,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2094, + context: p3167, freeVariables: Identifiers{ "invar", }, @@ -35992,7 +37596,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2094, + context: p3167, freeVariables: Identifiers{ "invar", }, @@ -36036,7 +37640,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2094, + context: p3167, freeVariables: nil, }, Value: "string", @@ -36063,7 +37667,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2094, + context: p3167, freeVariables: Identifiers{ "cur", "invar", @@ -36084,7 +37688,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2094, + context: p3167, freeVariables: Identifiers{ "slice", }, @@ -36106,7 +37710,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2094, + context: p3167, freeVariables: Identifiers{ "cur", "invar", @@ -36126,7 +37730,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2094, + context: p3167, freeVariables: Identifiers{ "invar", }, @@ -36145,7 +37749,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2094, + context: p3167, freeVariables: Identifiers{ "invar", }, @@ -36189,7 +37793,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2094, + context: p3167, freeVariables: Identifiers{ "cur", }, @@ -36213,7 +37817,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2094, + context: p3167, freeVariables: Identifiers{ "cur", "invar", @@ -36234,7 +37838,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2094, + context: p3167, freeVariables: Identifiers{ "slice", }, @@ -36256,7 +37860,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2094, + context: p3167, freeVariables: Identifiers{ "cur", "invar", @@ -36277,7 +37881,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2131, + context: p3204, freeVariables: Identifiers{ "cur", "invar", @@ -36297,7 +37901,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2131, + context: p3204, freeVariables: Identifiers{ "invar", }, @@ -36316,7 +37920,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2131, + context: p3204, freeVariables: Identifiers{ "invar", }, @@ -36360,7 +37964,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2131, + context: p3204, freeVariables: Identifiers{ "cur", }, @@ -36388,7 +37992,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2094, + context: p3167, freeVariables: Identifiers{ "cur", "invar", @@ -36408,7 +38012,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2094, + context: p3167, freeVariables: Identifiers{ "cur", }, @@ -36430,7 +38034,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2094, + context: p3167, freeVariables: Identifiers{ "invar", }, @@ -36449,7 +38053,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2094, + context: p3167, freeVariables: Identifiers{ "invar", }, @@ -36505,7 +38109,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1804, + context: p2877, freeVariables: Identifiers{ "build", "invar", @@ -36526,7 +38130,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p1804, + context: p2877, freeVariables: Identifiers{ "build", }, @@ -36549,7 +38153,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2155, + context: p3228, freeVariables: Identifiers{ "invar", "std", @@ -36654,7 +38258,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2155, + context: p3228, freeVariables: Identifiers{ "invar", }, @@ -36673,7 +38277,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2155, + context: p3228, freeVariables: Identifiers{ "invar", }, @@ -36717,7 +38321,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2155, + context: p3228, freeVariables: nil, }, Value: "string", @@ -36744,7 +38348,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2155, + context: p3228, freeVariables: nil, }, Value: "", @@ -36765,7 +38369,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2155, + context: p3228, freeVariables: nil, }, Elements: nil, @@ -36786,7 +38390,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2155, + context: p3228, freeVariables: Identifiers{ "invar", }, @@ -36805,7 +38409,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2155, + context: p3228, freeVariables: Identifiers{ "invar", }, @@ -36907,7 +38511,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p3255, freeVariables: nil, }, }, @@ -36929,7 +38533,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p3257, freeVariables: nil, }, Value: "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/", @@ -37171,7 +38775,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p3281, freeVariables: Identifiers{ "base64_table", "i", @@ -37195,7 +38799,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p3285, freeVariables: Identifiers{ "base64_table", "i", @@ -37215,7 +38819,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p3288, freeVariables: Identifiers{ "base64_table", }, @@ -37236,7 +38840,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p3291, freeVariables: Identifiers{ "i", }, @@ -37259,7 +38863,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p43, + context: p3294, freeVariables: Identifiers{ "i", }, @@ -37288,7 +38892,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p3297, freeVariables: Identifiers{ "std", }, @@ -37307,7 +38911,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p3300, freeVariables: Identifiers{ "std", }, @@ -37326,7 +38930,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p3303, freeVariables: Identifiers{ "std", }, @@ -37372,7 +38976,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p54, + context: p3308, freeVariables: nil, }, Value: float64(0), @@ -37392,14 +38996,14 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p54, + context: p3310, freeVariables: nil, }, Value: float64(63), OriginalString: "63", }, }, - Named: nil, + Named: []NamedArgument{}, }, TrailingComma: false, TailStrict: false, @@ -37455,7 +39059,7 @@ var StdAst = &DesugaredObject{ }, file: nil, }, - context: p9, + context: p68, freeVariables: Identifiers{ "std", }, @@ -37482,7 +39086,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2184, + context: p3316, freeVariables: Identifiers{ "arr", "std", @@ -37503,7 +39107,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2184, + context: p3316, freeVariables: Identifiers{ "std", }, @@ -37522,7 +39126,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2184, + context: p3316, freeVariables: Identifiers{ "std", }, @@ -37568,7 +39172,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2193, + context: p3325, freeVariables: Identifiers{ "arr", "std", @@ -37589,7 +39193,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2193, + context: p3325, freeVariables: Identifiers{ "std", }, @@ -37608,7 +39212,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2193, + context: p3325, freeVariables: Identifiers{ "std", }, @@ -37654,7 +39258,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2202, + context: p3334, freeVariables: Identifiers{ "std", "x", @@ -37767,7 +39371,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2214, + context: p3346, freeVariables: Identifiers{ "v", }, @@ -37788,7 +39392,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2214, + context: p3346, freeVariables: Identifiers{ "x", }, @@ -37816,7 +39420,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2202, + context: p3334, freeVariables: Identifiers{ "arr", }, @@ -37896,7 +39500,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p3356, freeVariables: nil, }, }, @@ -37918,7 +39522,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p3358, freeVariables: nil, }, Value: "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/", @@ -38160,7 +39764,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p3382, freeVariables: Identifiers{ "base64_table", "i", @@ -38184,7 +39788,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p3386, freeVariables: Identifiers{ "base64_table", "i", @@ -38204,7 +39808,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p3389, freeVariables: Identifiers{ "base64_table", }, @@ -38225,7 +39829,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p3392, freeVariables: Identifiers{ "i", }, @@ -38248,7 +39852,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p43, + context: p3395, freeVariables: Identifiers{ "i", }, @@ -38277,7 +39881,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p3398, freeVariables: Identifiers{ "std", }, @@ -38296,7 +39900,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p3401, freeVariables: Identifiers{ "std", }, @@ -38315,7 +39919,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p3404, freeVariables: Identifiers{ "std", }, @@ -38361,7 +39965,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p54, + context: p3409, freeVariables: nil, }, Value: float64(0), @@ -38381,14 +39985,14 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p54, + context: p3411, freeVariables: nil, }, Value: float64(63), OriginalString: "63", }, }, - Named: nil, + Named: []NamedArgument{}, }, TrailingComma: false, TailStrict: false, @@ -38444,7 +40048,7 @@ var StdAst = &DesugaredObject{ }, file: nil, }, - context: p9, + context: p68, freeVariables: Identifiers{ "std", }, @@ -38471,7 +40075,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2226, + context: p3417, freeVariables: Identifiers{ "a", "b", @@ -38492,7 +40096,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2226, + context: p3417, freeVariables: Identifiers{ "a", "b", @@ -38598,7 +40202,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2226, + context: p3417, freeVariables: Identifiers{ "a", "std", @@ -38618,7 +40222,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2226, + context: p3417, freeVariables: Identifiers{ "std", }, @@ -38637,7 +40241,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2226, + context: p3417, freeVariables: Identifiers{ "std", }, @@ -38683,7 +40287,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2247, + context: p3438, freeVariables: Identifiers{ "a", }, @@ -38710,7 +40314,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2226, + context: p3417, freeVariables: nil, }, Value: "number", @@ -38823,7 +40427,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2226, + context: p3417, freeVariables: Identifiers{ "b", "std", @@ -38843,7 +40447,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2226, + context: p3417, freeVariables: Identifiers{ "std", }, @@ -38862,7 +40466,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2226, + context: p3417, freeVariables: Identifiers{ "std", }, @@ -38908,7 +40512,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2267, + context: p3458, freeVariables: Identifiers{ "b", }, @@ -38935,7 +40539,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2226, + context: p3417, freeVariables: nil, }, Value: "number", @@ -38963,7 +40567,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2226, + context: p3417, freeVariables: Identifiers{ "a", "b", @@ -38984,7 +40588,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2226, + context: p3417, freeVariables: Identifiers{ "std", }, @@ -39003,7 +40607,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2226, + context: p3417, freeVariables: Identifiers{ "std", }, @@ -39049,7 +40653,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2279, + context: p3470, freeVariables: Identifiers{ "a", }, @@ -39070,7 +40674,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2279, + context: p3470, freeVariables: Identifiers{ "b", }, @@ -39097,7 +40701,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2226, + context: p3417, freeVariables: Identifiers{ "a", "b", @@ -39203,7 +40807,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2226, + context: p3417, freeVariables: Identifiers{ "a", "std", @@ -39223,7 +40827,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2226, + context: p3417, freeVariables: Identifiers{ "std", }, @@ -39242,7 +40846,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2226, + context: p3417, freeVariables: Identifiers{ "std", }, @@ -39288,7 +40892,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2302, + context: p3493, freeVariables: Identifiers{ "a", }, @@ -39315,7 +40919,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2226, + context: p3417, freeVariables: nil, }, Value: "string", @@ -39342,7 +40946,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2226, + context: p3417, freeVariables: Identifiers{ "a", "b", @@ -39363,7 +40967,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2226, + context: p3417, freeVariables: Identifiers{ "std", }, @@ -39382,7 +40986,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2226, + context: p3417, freeVariables: Identifiers{ "std", }, @@ -39428,7 +41032,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2314, + context: p3505, freeVariables: Identifiers{ "a", }, @@ -39449,7 +41053,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2314, + context: p3505, freeVariables: Identifiers{ "b", }, @@ -39476,7 +41080,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2226, + context: p3417, freeVariables: Identifiers{ "a", "b", @@ -39497,7 +41101,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2226, + context: p3417, freeVariables: Identifiers{ "a", "b", @@ -39518,7 +41122,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2226, + context: p3417, freeVariables: Identifiers{ "a", "b", @@ -39539,7 +41143,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2226, + context: p3417, freeVariables: Identifiers{ "a", "std", @@ -39559,7 +41163,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2226, + context: p3417, freeVariables: Identifiers{ "a", "std", @@ -39579,7 +41183,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2226, + context: p3417, freeVariables: nil, }, Value: "Operator % cannot be used on types ", @@ -39601,7 +41205,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2226, + context: p3417, freeVariables: Identifiers{ "a", "std", @@ -39621,7 +41225,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2226, + context: p3417, freeVariables: Identifiers{ "std", }, @@ -39640,7 +41244,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2226, + context: p3417, freeVariables: Identifiers{ "std", }, @@ -39686,7 +41290,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2338, + context: p3529, freeVariables: Identifiers{ "a", }, @@ -39715,7 +41319,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2226, + context: p3417, freeVariables: nil, }, Value: " and ", @@ -39738,7 +41342,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2226, + context: p3417, freeVariables: Identifiers{ "b", "std", @@ -39758,7 +41362,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2226, + context: p3417, freeVariables: Identifiers{ "std", }, @@ -39777,7 +41381,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2226, + context: p3417, freeVariables: Identifiers{ "std", }, @@ -39823,7 +41427,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2350, + context: p3541, freeVariables: Identifiers{ "b", }, @@ -39852,7 +41456,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2226, + context: p3417, freeVariables: nil, }, Value: ".", @@ -39924,7 +41528,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p3548, freeVariables: nil, }, }, @@ -39946,7 +41550,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p3550, freeVariables: nil, }, Value: "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/", @@ -40188,7 +41792,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p3574, freeVariables: Identifiers{ "base64_table", "i", @@ -40212,7 +41816,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p3578, freeVariables: Identifiers{ "base64_table", "i", @@ -40232,7 +41836,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p3581, freeVariables: Identifiers{ "base64_table", }, @@ -40253,7 +41857,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p3584, freeVariables: Identifiers{ "i", }, @@ -40276,7 +41880,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p43, + context: p3587, freeVariables: Identifiers{ "i", }, @@ -40305,7 +41909,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p3590, freeVariables: Identifiers{ "std", }, @@ -40324,7 +41928,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p3593, freeVariables: Identifiers{ "std", }, @@ -40343,7 +41947,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p3596, freeVariables: Identifiers{ "std", }, @@ -40389,7 +41993,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p54, + context: p3601, freeVariables: nil, }, Value: float64(0), @@ -40409,14 +42013,14 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p54, + context: p3603, freeVariables: nil, }, Value: float64(63), OriginalString: "63", }, }, - Named: nil, + Named: []NamedArgument{}, }, TrailingComma: false, TailStrict: false, @@ -40472,7 +42076,7 @@ var StdAst = &DesugaredObject{ }, file: nil, }, - context: p9, + context: p68, freeVariables: Identifiers{ "std", }, @@ -40499,7 +42103,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2359, + context: p3609, freeVariables: Identifiers{ "arr", "func", @@ -40626,7 +42230,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2359, + context: p3609, freeVariables: Identifiers{ "func", "std", @@ -40646,7 +42250,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2359, + context: p3609, freeVariables: Identifiers{ "std", }, @@ -40665,7 +42269,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2359, + context: p3609, freeVariables: Identifiers{ "std", }, @@ -40711,7 +42315,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2380, + context: p3630, freeVariables: Identifiers{ "func", }, @@ -40738,7 +42342,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2359, + context: p3609, freeVariables: nil, }, Value: "function", @@ -40766,7 +42370,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2359, + context: p3609, freeVariables: Identifiers{ "func", "std", @@ -40786,7 +42390,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2359, + context: p3609, freeVariables: Identifiers{ "func", "std", @@ -40806,7 +42410,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2359, + context: p3609, freeVariables: nil, }, Value: "std.map first param must be function, got ", @@ -40828,7 +42432,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2359, + context: p3609, freeVariables: Identifiers{ "func", "std", @@ -40848,7 +42452,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2359, + context: p3609, freeVariables: Identifiers{ "std", }, @@ -40867,7 +42471,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2359, + context: p3609, freeVariables: Identifiers{ "std", }, @@ -40913,7 +42517,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2397, + context: p3647, freeVariables: Identifiers{ "func", }, @@ -40942,7 +42546,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2359, + context: p3609, freeVariables: Identifiers{ "arr", "func", @@ -40963,7 +42567,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2359, + context: p3609, freeVariables: Identifiers{ "arr", "std", @@ -41089,7 +42693,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2359, + context: p3609, freeVariables: Identifiers{ "arr", "std", @@ -41109,7 +42713,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2359, + context: p3609, freeVariables: Identifiers{ "std", }, @@ -41128,7 +42732,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2359, + context: p3609, freeVariables: Identifiers{ "std", }, @@ -41174,7 +42778,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2422, + context: p3672, freeVariables: Identifiers{ "arr", }, @@ -41201,7 +42805,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2359, + context: p3609, freeVariables: nil, }, Value: "array", @@ -41336,7 +42940,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2359, + context: p3609, freeVariables: Identifiers{ "arr", "std", @@ -41356,7 +42960,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2359, + context: p3609, freeVariables: Identifiers{ "std", }, @@ -41375,7 +42979,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2359, + context: p3609, freeVariables: Identifiers{ "std", }, @@ -41421,7 +43025,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2444, + context: p3694, freeVariables: Identifiers{ "arr", }, @@ -41448,7 +43052,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2359, + context: p3609, freeVariables: nil, }, Value: "string", @@ -41477,7 +43081,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2359, + context: p3609, freeVariables: Identifiers{ "arr", "std", @@ -41497,7 +43101,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2359, + context: p3609, freeVariables: Identifiers{ "arr", "std", @@ -41517,7 +43121,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2359, + context: p3609, freeVariables: nil, }, Value: "std.map second param must be array / string, got ", @@ -41539,7 +43143,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2359, + context: p3609, freeVariables: Identifiers{ "arr", "std", @@ -41559,7 +43163,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2359, + context: p3609, freeVariables: Identifiers{ "std", }, @@ -41578,7 +43182,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2359, + context: p3609, freeVariables: Identifiers{ "std", }, @@ -41624,7 +43228,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2461, + context: p3711, freeVariables: Identifiers{ "arr", }, @@ -41653,7 +43257,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2359, + context: p3609, freeVariables: Identifiers{ "arr", "func", @@ -41674,7 +43278,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2359, + context: p3609, freeVariables: Identifiers{ "std", }, @@ -41693,7 +43297,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2359, + context: p3609, freeVariables: Identifiers{ "std", }, @@ -41739,7 +43343,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2472, + context: p3722, freeVariables: Identifiers{ "arr", "std", @@ -41759,7 +43363,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2472, + context: p3722, freeVariables: Identifiers{ "std", }, @@ -41778,7 +43382,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2472, + context: p3722, freeVariables: Identifiers{ "std", }, @@ -41824,7 +43428,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2481, + context: p3731, freeVariables: Identifiers{ "arr", }, @@ -41851,7 +43455,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2472, + context: p3722, freeVariables: Identifiers{ "arr", "func", @@ -41878,7 +43482,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2487, + context: p3737, freeVariables: Identifiers{ "arr", "func", @@ -41899,7 +43503,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2487, + context: p3737, freeVariables: Identifiers{ "func", }, @@ -41922,7 +43526,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2493, + context: p3743, freeVariables: Identifiers{ "arr", "i", @@ -41942,7 +43546,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2493, + context: p3743, freeVariables: Identifiers{ "arr", }, @@ -41963,7 +43567,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2493, + context: p3743, freeVariables: Identifiers{ "i", }, @@ -42048,7 +43652,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p3753, freeVariables: nil, }, }, @@ -42070,7 +43674,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p3755, freeVariables: nil, }, Value: "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/", @@ -42312,7 +43916,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p3779, freeVariables: Identifiers{ "base64_table", "i", @@ -42336,7 +43940,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p3783, freeVariables: Identifiers{ "base64_table", "i", @@ -42356,7 +43960,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p3786, freeVariables: Identifiers{ "base64_table", }, @@ -42377,7 +43981,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p3789, freeVariables: Identifiers{ "i", }, @@ -42400,7 +44004,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p43, + context: p3792, freeVariables: Identifiers{ "i", }, @@ -42429,7 +44033,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p3795, freeVariables: Identifiers{ "std", }, @@ -42448,7 +44052,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p3798, freeVariables: Identifiers{ "std", }, @@ -42467,7 +44071,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p3801, freeVariables: Identifiers{ "std", }, @@ -42513,7 +44117,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p54, + context: p3806, freeVariables: nil, }, Value: float64(0), @@ -42533,14 +44137,14 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p54, + context: p3808, freeVariables: nil, }, Value: float64(63), OriginalString: "63", }, }, - Named: nil, + Named: []NamedArgument{}, }, TrailingComma: false, TailStrict: false, @@ -42596,7 +44200,7 @@ var StdAst = &DesugaredObject{ }, file: nil, }, - context: p9, + context: p68, freeVariables: Identifiers{ "std", }, @@ -42623,7 +44227,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2505, + context: p3814, freeVariables: Identifiers{ "arr", "func", @@ -42750,7 +44354,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2505, + context: p3814, freeVariables: Identifiers{ "func", "std", @@ -42770,7 +44374,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2505, + context: p3814, freeVariables: Identifiers{ "std", }, @@ -42789,7 +44393,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2505, + context: p3814, freeVariables: Identifiers{ "std", }, @@ -42835,7 +44439,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2526, + context: p3835, freeVariables: Identifiers{ "func", }, @@ -42862,7 +44466,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2505, + context: p3814, freeVariables: nil, }, Value: "function", @@ -42890,7 +44494,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2505, + context: p3814, freeVariables: Identifiers{ "func", "std", @@ -42910,7 +44514,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2505, + context: p3814, freeVariables: Identifiers{ "func", "std", @@ -42930,7 +44534,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2505, + context: p3814, freeVariables: nil, }, Value: "std.mapWithIndex first param must be function, got ", @@ -42952,7 +44556,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2505, + context: p3814, freeVariables: Identifiers{ "func", "std", @@ -42972,7 +44576,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2505, + context: p3814, freeVariables: Identifiers{ "std", }, @@ -42991,7 +44595,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2505, + context: p3814, freeVariables: Identifiers{ "std", }, @@ -43037,7 +44641,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2543, + context: p3852, freeVariables: Identifiers{ "func", }, @@ -43066,7 +44670,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2505, + context: p3814, freeVariables: Identifiers{ "arr", "func", @@ -43087,7 +44691,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2505, + context: p3814, freeVariables: Identifiers{ "arr", "std", @@ -43213,7 +44817,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2505, + context: p3814, freeVariables: Identifiers{ "arr", "std", @@ -43233,7 +44837,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2505, + context: p3814, freeVariables: Identifiers{ "std", }, @@ -43252,7 +44856,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2505, + context: p3814, freeVariables: Identifiers{ "std", }, @@ -43298,7 +44902,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2568, + context: p3877, freeVariables: Identifiers{ "arr", }, @@ -43325,7 +44929,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2505, + context: p3814, freeVariables: nil, }, Value: "array", @@ -43460,7 +45064,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2505, + context: p3814, freeVariables: Identifiers{ "arr", "std", @@ -43480,7 +45084,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2505, + context: p3814, freeVariables: Identifiers{ "std", }, @@ -43499,7 +45103,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2505, + context: p3814, freeVariables: Identifiers{ "std", }, @@ -43545,7 +45149,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2590, + context: p3899, freeVariables: Identifiers{ "arr", }, @@ -43572,7 +45176,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2505, + context: p3814, freeVariables: nil, }, Value: "string", @@ -43601,7 +45205,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2505, + context: p3814, freeVariables: Identifiers{ "arr", "std", @@ -43621,7 +45225,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2505, + context: p3814, freeVariables: Identifiers{ "arr", "std", @@ -43641,7 +45245,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2505, + context: p3814, freeVariables: nil, }, Value: "std.mapWithIndex second param must be array, got ", @@ -43663,7 +45267,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2505, + context: p3814, freeVariables: Identifiers{ "arr", "std", @@ -43683,7 +45287,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2505, + context: p3814, freeVariables: Identifiers{ "std", }, @@ -43702,7 +45306,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2505, + context: p3814, freeVariables: Identifiers{ "std", }, @@ -43748,7 +45352,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2607, + context: p3916, freeVariables: Identifiers{ "arr", }, @@ -43777,7 +45381,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2505, + context: p3814, freeVariables: Identifiers{ "arr", "func", @@ -43798,7 +45402,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2505, + context: p3814, freeVariables: Identifiers{ "std", }, @@ -43817,7 +45421,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2505, + context: p3814, freeVariables: Identifiers{ "std", }, @@ -43863,7 +45467,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2618, + context: p3927, freeVariables: Identifiers{ "arr", "std", @@ -43883,7 +45487,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2618, + context: p3927, freeVariables: Identifiers{ "std", }, @@ -43902,7 +45506,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2618, + context: p3927, freeVariables: Identifiers{ "std", }, @@ -43948,7 +45552,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2627, + context: p3936, freeVariables: Identifiers{ "arr", }, @@ -43975,7 +45579,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2618, + context: p3927, freeVariables: Identifiers{ "arr", "func", @@ -44002,7 +45606,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2633, + context: p3942, freeVariables: Identifiers{ "arr", "func", @@ -44023,7 +45627,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2633, + context: p3942, freeVariables: Identifiers{ "func", }, @@ -44046,7 +45650,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2639, + context: p3948, freeVariables: Identifiers{ "i", }, @@ -44067,7 +45671,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2639, + context: p3948, freeVariables: Identifiers{ "arr", "i", @@ -44087,7 +45691,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2639, + context: p3948, freeVariables: Identifiers{ "arr", }, @@ -44108,7 +45712,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2639, + context: p3948, freeVariables: Identifiers{ "i", }, @@ -44193,7 +45797,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p3960, freeVariables: nil, }, }, @@ -44215,7 +45819,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p3962, freeVariables: nil, }, Value: "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/", @@ -44457,7 +46061,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p3986, freeVariables: Identifiers{ "base64_table", "i", @@ -44481,7 +46085,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p3990, freeVariables: Identifiers{ "base64_table", "i", @@ -44501,7 +46105,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p3993, freeVariables: Identifiers{ "base64_table", }, @@ -44522,7 +46126,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p3996, freeVariables: Identifiers{ "i", }, @@ -44545,7 +46149,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p43, + context: p3999, freeVariables: Identifiers{ "i", }, @@ -44574,7 +46178,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p4002, freeVariables: Identifiers{ "std", }, @@ -44593,7 +46197,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p4005, freeVariables: Identifiers{ "std", }, @@ -44612,7 +46216,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p4008, freeVariables: Identifiers{ "std", }, @@ -44658,7 +46262,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p54, + context: p4013, freeVariables: nil, }, Value: float64(0), @@ -44678,14 +46282,14 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p54, + context: p4015, freeVariables: nil, }, Value: float64(63), OriginalString: "63", }, }, - Named: nil, + Named: []NamedArgument{}, }, TrailingComma: false, TailStrict: false, @@ -44741,7 +46345,7 @@ var StdAst = &DesugaredObject{ }, file: nil, }, - context: p9, + context: p68, freeVariables: Identifiers{ "std", }, @@ -44768,7 +46372,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2653, + context: p4021, freeVariables: Identifiers{ "func", "obj", @@ -44895,7 +46499,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2653, + context: p4021, freeVariables: Identifiers{ "func", "std", @@ -44915,7 +46519,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2653, + context: p4021, freeVariables: Identifiers{ "std", }, @@ -44934,7 +46538,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2653, + context: p4021, freeVariables: Identifiers{ "std", }, @@ -44980,7 +46584,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2674, + context: p4042, freeVariables: Identifiers{ "func", }, @@ -45007,7 +46611,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2653, + context: p4021, freeVariables: nil, }, Value: "function", @@ -45035,7 +46639,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2653, + context: p4021, freeVariables: Identifiers{ "func", "std", @@ -45055,7 +46659,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2653, + context: p4021, freeVariables: Identifiers{ "func", "std", @@ -45075,7 +46679,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2653, + context: p4021, freeVariables: nil, }, Value: "std.mapWithKey first param must be function, got ", @@ -45097,7 +46701,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2653, + context: p4021, freeVariables: Identifiers{ "func", "std", @@ -45117,7 +46721,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2653, + context: p4021, freeVariables: Identifiers{ "std", }, @@ -45136,7 +46740,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2653, + context: p4021, freeVariables: Identifiers{ "std", }, @@ -45182,7 +46786,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2691, + context: p4059, freeVariables: Identifiers{ "func", }, @@ -45211,7 +46815,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2653, + context: p4021, freeVariables: Identifiers{ "func", "obj", @@ -45338,7 +46942,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2653, + context: p4021, freeVariables: Identifiers{ "obj", "std", @@ -45358,7 +46962,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2653, + context: p4021, freeVariables: Identifiers{ "std", }, @@ -45377,7 +46981,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2653, + context: p4021, freeVariables: Identifiers{ "std", }, @@ -45423,7 +47027,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2714, + context: p4082, freeVariables: Identifiers{ "obj", }, @@ -45450,7 +47054,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2653, + context: p4021, freeVariables: nil, }, Value: "object", @@ -45478,7 +47082,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2653, + context: p4021, freeVariables: Identifiers{ "obj", "std", @@ -45498,7 +47102,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2653, + context: p4021, freeVariables: Identifiers{ "obj", "std", @@ -45518,7 +47122,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2653, + context: p4021, freeVariables: nil, }, Value: "std.mapWithKey second param must be object, got ", @@ -45540,7 +47144,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2653, + context: p4021, freeVariables: Identifiers{ "obj", "std", @@ -45560,7 +47164,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2653, + context: p4021, freeVariables: Identifiers{ "std", }, @@ -45579,7 +47183,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2653, + context: p4021, freeVariables: Identifiers{ "std", }, @@ -45625,7 +47229,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2731, + context: p4099, freeVariables: Identifiers{ "obj", }, @@ -45875,7 +47479,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2653, + context: p4021, freeVariables: Identifiers{ "func", "k", @@ -45900,7 +47504,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2653, + context: p4021, freeVariables: Identifiers{ "k", }, @@ -45921,7 +47525,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2761, + context: p4129, freeVariables: Identifiers{ "func", "k", @@ -45942,7 +47546,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2761, + context: p4129, freeVariables: Identifiers{ "func", }, @@ -45965,7 +47569,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2767, + context: p4135, freeVariables: Identifiers{ "k", }, @@ -45986,7 +47590,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2767, + context: p4135, freeVariables: Identifiers{ "k", "obj", @@ -46006,7 +47610,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2767, + context: p4135, freeVariables: Identifiers{ "obj", }, @@ -46027,7 +47631,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2767, + context: p4135, freeVariables: Identifiers{ "k", }, @@ -46064,7 +47668,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2653, + context: p4021, freeVariables: Identifiers{ "obj", "std", @@ -46084,7 +47688,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2653, + context: p4021, freeVariables: Identifiers{ "std", }, @@ -46103,7 +47707,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2653, + context: p4021, freeVariables: Identifiers{ "std", }, @@ -46149,7 +47753,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2784, + context: p4152, freeVariables: Identifiers{ "obj", }, @@ -46237,7 +47841,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p4158, freeVariables: nil, }, }, @@ -46259,7 +47863,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p4160, freeVariables: nil, }, Value: "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/", @@ -46501,7 +48105,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p4184, freeVariables: Identifiers{ "base64_table", "i", @@ -46525,7 +48129,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p4188, freeVariables: Identifiers{ "base64_table", "i", @@ -46545,7 +48149,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p4191, freeVariables: Identifiers{ "base64_table", }, @@ -46566,7 +48170,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p4194, freeVariables: Identifiers{ "i", }, @@ -46589,7 +48193,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p43, + context: p4197, freeVariables: Identifiers{ "i", }, @@ -46618,7 +48222,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p4200, freeVariables: Identifiers{ "std", }, @@ -46637,7 +48241,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p4203, freeVariables: Identifiers{ "std", }, @@ -46656,7 +48260,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p4206, freeVariables: Identifiers{ "std", }, @@ -46702,7 +48306,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p54, + context: p4211, freeVariables: nil, }, Value: float64(0), @@ -46722,14 +48326,14 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p54, + context: p4213, freeVariables: nil, }, Value: float64(63), OriginalString: "63", }, }, - Named: nil, + Named: []NamedArgument{}, }, TrailingComma: false, TailStrict: false, @@ -46785,7 +48389,7 @@ var StdAst = &DesugaredObject{ }, file: nil, }, - context: p9, + context: p68, freeVariables: Identifiers{ "std", }, @@ -46812,7 +48416,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2792, + context: p4219, freeVariables: Identifiers{ "arr", "sep", @@ -46836,7 +48440,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2796, + context: p4223, freeVariables: Identifiers{ "aux", "sep", @@ -46867,7 +48471,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2800, + context: p4227, freeVariables: Identifiers{ "arr", "aux", @@ -46892,7 +48496,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2800, + context: p4227, freeVariables: Identifiers{ "arr", "i", @@ -46913,7 +48517,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2800, + context: p4227, freeVariables: Identifiers{ "i", }, @@ -46935,7 +48539,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2800, + context: p4227, freeVariables: Identifiers{ "arr", "std", @@ -46955,7 +48559,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2800, + context: p4227, freeVariables: Identifiers{ "std", }, @@ -46974,7 +48578,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2800, + context: p4227, freeVariables: Identifiers{ "std", }, @@ -47020,7 +48624,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2815, + context: p4242, freeVariables: Identifiers{ "arr", }, @@ -47048,7 +48652,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2800, + context: p4227, freeVariables: Identifiers{ "running", }, @@ -47069,7 +48673,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2800, + context: p4227, freeVariables: Identifiers{ "arr", "aux", @@ -47180,7 +48784,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2800, + context: p4227, freeVariables: Identifiers{ "arr", "i", @@ -47200,7 +48804,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2800, + context: p4227, freeVariables: Identifiers{ "arr", }, @@ -47221,7 +48825,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2800, + context: p4227, freeVariables: Identifiers{ "i", }, @@ -47244,7 +48848,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2800, + context: p4227, freeVariables: nil, }, }, @@ -47268,7 +48872,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2800, + context: p4227, freeVariables: Identifiers{ "arr", "aux", @@ -47291,7 +48895,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2800, + context: p4227, freeVariables: Identifiers{ "aux", }, @@ -47314,7 +48918,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2842, + context: p4269, freeVariables: Identifiers{ "arr", }, @@ -47335,7 +48939,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2842, + context: p4269, freeVariables: Identifiers{ "i", }, @@ -47354,7 +48958,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2842, + context: p4269, freeVariables: Identifiers{ "i", }, @@ -47376,7 +48980,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2842, + context: p4269, freeVariables: nil, }, Value: float64(1), @@ -47397,7 +49001,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2842, + context: p4269, freeVariables: Identifiers{ "first", }, @@ -47418,7 +49022,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2842, + context: p4269, freeVariables: Identifiers{ "running", }, @@ -47445,7 +49049,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2800, + context: p4227, freeVariables: Identifiers{ "arr", "aux", @@ -47580,7 +49184,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2800, + context: p4227, freeVariables: Identifiers{ "arr", "i", @@ -47601,7 +49205,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2800, + context: p4227, freeVariables: Identifiers{ "std", }, @@ -47620,7 +49224,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2800, + context: p4227, freeVariables: Identifiers{ "std", }, @@ -47666,7 +49270,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2874, + context: p4301, freeVariables: Identifiers{ "arr", "i", @@ -47686,7 +49290,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2874, + context: p4301, freeVariables: Identifiers{ "arr", }, @@ -47707,7 +49311,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2874, + context: p4301, freeVariables: Identifiers{ "i", }, @@ -47736,7 +49340,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2800, + context: p4227, freeVariables: Identifiers{ "sep", "std", @@ -47756,7 +49360,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2800, + context: p4227, freeVariables: Identifiers{ "std", }, @@ -47775,7 +49379,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2800, + context: p4227, freeVariables: Identifiers{ "std", }, @@ -47821,7 +49425,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2889, + context: p4316, freeVariables: Identifiers{ "sep", }, @@ -47855,7 +49459,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2800, + context: p4227, freeVariables: Identifiers{ "arr", "i", @@ -47964,7 +49568,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2800, + context: p4227, freeVariables: nil, }, Value: "expected %s but arr[%d] was %s ", @@ -47985,7 +49589,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2800, + context: p4227, freeVariables: Identifiers{ "arr", "i", @@ -48008,7 +49612,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2906, + context: p4333, freeVariables: Identifiers{ "sep", "std", @@ -48028,7 +49632,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2906, + context: p4333, freeVariables: Identifiers{ "std", }, @@ -48047,7 +49651,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2906, + context: p4333, freeVariables: Identifiers{ "std", }, @@ -48093,7 +49697,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2915, + context: p4342, freeVariables: Identifiers{ "sep", }, @@ -48120,7 +49724,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2906, + context: p4333, freeVariables: Identifiers{ "i", }, @@ -48141,7 +49745,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2906, + context: p4333, freeVariables: Identifiers{ "arr", "i", @@ -48162,7 +49766,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2906, + context: p4333, freeVariables: Identifiers{ "std", }, @@ -48181,7 +49785,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2906, + context: p4333, freeVariables: Identifiers{ "std", }, @@ -48227,7 +49831,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2928, + context: p4355, freeVariables: Identifiers{ "arr", "i", @@ -48247,7 +49851,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2928, + context: p4355, freeVariables: Identifiers{ "arr", }, @@ -48268,7 +49872,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2928, + context: p4355, freeVariables: Identifiers{ "i", }, @@ -48307,7 +49911,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2800, + context: p4227, freeVariables: Identifiers{ "arr", "aux", @@ -48331,7 +49935,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2800, + context: p4227, freeVariables: Identifiers{ "first", }, @@ -48352,7 +49956,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2800, + context: p4227, freeVariables: Identifiers{ "arr", "aux", @@ -48374,7 +49978,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2800, + context: p4227, freeVariables: Identifiers{ "aux", }, @@ -48397,7 +50001,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2944, + context: p4371, freeVariables: Identifiers{ "arr", }, @@ -48418,7 +50022,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2944, + context: p4371, freeVariables: Identifiers{ "i", }, @@ -48437,7 +50041,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2944, + context: p4371, freeVariables: Identifiers{ "i", }, @@ -48459,7 +50063,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2944, + context: p4371, freeVariables: nil, }, Value: float64(1), @@ -48480,7 +50084,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2944, + context: p4371, freeVariables: nil, }, Value: false, @@ -48499,7 +50103,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2944, + context: p4371, freeVariables: Identifiers{ "arr", "i", @@ -48520,7 +50124,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2944, + context: p4371, freeVariables: Identifiers{ "running", }, @@ -48542,7 +50146,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2944, + context: p4371, freeVariables: Identifiers{ "arr", "i", @@ -48562,7 +50166,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2944, + context: p4371, freeVariables: Identifiers{ "arr", }, @@ -48583,7 +50187,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2944, + context: p4371, freeVariables: Identifiers{ "i", }, @@ -48613,7 +50217,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2800, + context: p4227, freeVariables: Identifiers{ "arr", "aux", @@ -48636,7 +50240,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2800, + context: p4227, freeVariables: Identifiers{ "aux", }, @@ -48659,7 +50263,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2968, + context: p4395, freeVariables: Identifiers{ "arr", }, @@ -48680,7 +50284,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2968, + context: p4395, freeVariables: Identifiers{ "i", }, @@ -48699,7 +50303,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2968, + context: p4395, freeVariables: Identifiers{ "i", }, @@ -48721,7 +50325,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2968, + context: p4395, freeVariables: nil, }, Value: float64(1), @@ -48742,7 +50346,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2968, + context: p4395, freeVariables: nil, }, Value: false, @@ -48761,7 +50365,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2968, + context: p4395, freeVariables: Identifiers{ "arr", "i", @@ -48783,7 +50387,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2968, + context: p4395, freeVariables: Identifiers{ "running", "sep", @@ -48803,7 +50407,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2968, + context: p4395, freeVariables: Identifiers{ "running", }, @@ -48825,7 +50429,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2968, + context: p4395, freeVariables: Identifiers{ "sep", }, @@ -48848,7 +50452,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2968, + context: p4395, freeVariables: Identifiers{ "arr", "i", @@ -48868,7 +50472,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2968, + context: p4395, freeVariables: Identifiers{ "arr", }, @@ -48889,7 +50493,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2968, + context: p4395, freeVariables: Identifiers{ "i", }, @@ -48927,7 +50531,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2792, + context: p4219, freeVariables: Identifiers{ "arr", "aux", @@ -49055,7 +50659,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2792, + context: p4219, freeVariables: Identifiers{ "arr", "std", @@ -49075,7 +50679,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2792, + context: p4219, freeVariables: Identifiers{ "std", }, @@ -49094,7 +50698,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2792, + context: p4219, freeVariables: Identifiers{ "std", }, @@ -49140,7 +50744,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3011, + context: p4438, freeVariables: Identifiers{ "arr", }, @@ -49167,7 +50771,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2792, + context: p4219, freeVariables: nil, }, Value: "array", @@ -49195,7 +50799,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2792, + context: p4219, freeVariables: Identifiers{ "arr", "std", @@ -49215,7 +50819,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2792, + context: p4219, freeVariables: Identifiers{ "arr", "std", @@ -49235,7 +50839,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2792, + context: p4219, freeVariables: nil, }, Value: "join second parameter should be array, got ", @@ -49257,7 +50861,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2792, + context: p4219, freeVariables: Identifiers{ "arr", "std", @@ -49277,7 +50881,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2792, + context: p4219, freeVariables: Identifiers{ "std", }, @@ -49296,7 +50900,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2792, + context: p4219, freeVariables: Identifiers{ "std", }, @@ -49342,7 +50946,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3028, + context: p4455, freeVariables: Identifiers{ "arr", }, @@ -49371,7 +50975,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2792, + context: p4219, freeVariables: Identifiers{ "arr", "aux", @@ -49478,7 +51082,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2792, + context: p4219, freeVariables: Identifiers{ "sep", "std", @@ -49498,7 +51102,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2792, + context: p4219, freeVariables: Identifiers{ "std", }, @@ -49517,7 +51121,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2792, + context: p4219, freeVariables: Identifiers{ "std", }, @@ -49563,7 +51167,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3049, + context: p4476, freeVariables: Identifiers{ "sep", }, @@ -49590,7 +51194,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2792, + context: p4219, freeVariables: nil, }, Value: "string", @@ -49617,7 +51221,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2792, + context: p4219, freeVariables: Identifiers{ "arr", "aux", @@ -49637,7 +51241,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2792, + context: p4219, freeVariables: Identifiers{ "aux", }, @@ -49660,7 +51264,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3058, + context: p4485, freeVariables: Identifiers{ "arr", }, @@ -49681,7 +51285,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3058, + context: p4485, freeVariables: nil, }, Value: float64(0), @@ -49701,7 +51305,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3058, + context: p4485, freeVariables: nil, }, Value: true, @@ -49720,7 +51324,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3058, + context: p4485, freeVariables: nil, }, Value: "", @@ -49747,7 +51351,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2792, + context: p4219, freeVariables: Identifiers{ "arr", "aux", @@ -49854,7 +51458,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2792, + context: p4219, freeVariables: Identifiers{ "sep", "std", @@ -49874,7 +51478,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2792, + context: p4219, freeVariables: Identifiers{ "std", }, @@ -49893,7 +51497,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2792, + context: p4219, freeVariables: Identifiers{ "std", }, @@ -49939,7 +51543,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3082, + context: p4509, freeVariables: Identifiers{ "sep", }, @@ -49966,7 +51570,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2792, + context: p4219, freeVariables: nil, }, Value: "array", @@ -49993,7 +51597,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2792, + context: p4219, freeVariables: Identifiers{ "arr", "aux", @@ -50013,7 +51617,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2792, + context: p4219, freeVariables: Identifiers{ "aux", }, @@ -50036,7 +51640,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3091, + context: p4518, freeVariables: Identifiers{ "arr", }, @@ -50057,7 +51661,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3091, + context: p4518, freeVariables: nil, }, Value: float64(0), @@ -50077,7 +51681,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3091, + context: p4518, freeVariables: nil, }, Value: true, @@ -50096,7 +51700,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3091, + context: p4518, freeVariables: nil, }, Elements: nil, @@ -50122,7 +51726,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2792, + context: p4219, freeVariables: Identifiers{ "sep", "std", @@ -50142,7 +51746,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2792, + context: p4219, freeVariables: Identifiers{ "sep", "std", @@ -50162,7 +51766,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2792, + context: p4219, freeVariables: nil, }, Value: "join first parameter should be string or array, got ", @@ -50184,7 +51788,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2792, + context: p4219, freeVariables: Identifiers{ "sep", "std", @@ -50204,7 +51808,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2792, + context: p4219, freeVariables: Identifiers{ "std", }, @@ -50223,7 +51827,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p2792, + context: p4219, freeVariables: Identifiers{ "std", }, @@ -50269,7 +51873,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3110, + context: p4537, freeVariables: Identifiers{ "sep", }, @@ -50349,7 +51953,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p4543, freeVariables: nil, }, }, @@ -50371,7 +51975,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p4545, freeVariables: nil, }, Value: "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/", @@ -50613,7 +52217,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p4569, freeVariables: Identifiers{ "base64_table", "i", @@ -50637,7 +52241,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p4573, freeVariables: Identifiers{ "base64_table", "i", @@ -50657,7 +52261,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p4576, freeVariables: Identifiers{ "base64_table", }, @@ -50678,7 +52282,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p4579, freeVariables: Identifiers{ "i", }, @@ -50701,7 +52305,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p43, + context: p4582, freeVariables: Identifiers{ "i", }, @@ -50730,7 +52334,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p4585, freeVariables: Identifiers{ "std", }, @@ -50749,7 +52353,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p4588, freeVariables: Identifiers{ "std", }, @@ -50768,7 +52372,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p4591, freeVariables: Identifiers{ "std", }, @@ -50814,7 +52418,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p54, + context: p4596, freeVariables: nil, }, Value: float64(0), @@ -50834,14 +52438,14 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p54, + context: p4598, freeVariables: nil, }, Value: float64(63), OriginalString: "63", }, }, - Named: nil, + Named: []NamedArgument{}, }, TrailingComma: false, TailStrict: false, @@ -50897,7 +52501,7 @@ var StdAst = &DesugaredObject{ }, file: nil, }, - context: p9, + context: p68, freeVariables: Identifiers{ "std", }, @@ -50923,7 +52527,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3118, + context: p4604, freeVariables: Identifiers{ "arr", "std", @@ -50943,7 +52547,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3118, + context: p4604, freeVariables: Identifiers{ "std", }, @@ -50962,7 +52566,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3118, + context: p4604, freeVariables: Identifiers{ "std", }, @@ -51008,7 +52612,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3127, + context: p4613, freeVariables: nil, }, Value: "\n", @@ -51029,7 +52633,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3127, + context: p4613, freeVariables: Identifiers{ "arr", }, @@ -51048,7 +52652,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3127, + context: p4613, freeVariables: Identifiers{ "arr", }, @@ -51070,7 +52674,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3127, + context: p4613, freeVariables: nil, }, Elements: Nodes{ @@ -51088,7 +52692,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3135, + context: p4621, freeVariables: nil, }, Value: "", @@ -51166,7 +52770,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p4626, freeVariables: nil, }, }, @@ -51188,7 +52792,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p4628, freeVariables: nil, }, Value: "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/", @@ -51430,7 +53034,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p4652, freeVariables: Identifiers{ "base64_table", "i", @@ -51454,7 +53058,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p4656, freeVariables: Identifiers{ "base64_table", "i", @@ -51474,7 +53078,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p4659, freeVariables: Identifiers{ "base64_table", }, @@ -51495,7 +53099,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p4662, freeVariables: Identifiers{ "i", }, @@ -51518,7 +53122,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p43, + context: p4665, freeVariables: Identifiers{ "i", }, @@ -51547,7 +53151,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p4668, freeVariables: Identifiers{ "std", }, @@ -51566,7 +53170,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p4671, freeVariables: Identifiers{ "std", }, @@ -51585,7 +53189,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p4674, freeVariables: Identifiers{ "std", }, @@ -51631,7 +53235,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p54, + context: p4679, freeVariables: nil, }, Value: float64(0), @@ -51651,14 +53255,14 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p54, + context: p4681, freeVariables: nil, }, Value: float64(63), OriginalString: "63", }, }, - Named: nil, + Named: []NamedArgument{}, }, TrailingComma: false, TailStrict: false, @@ -51714,7 +53318,7 @@ var StdAst = &DesugaredObject{ }, file: nil, }, - context: p9, + context: p68, freeVariables: Identifiers{ "std", }, @@ -51740,7 +53344,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3142, + context: p4687, freeVariables: Identifiers{ "arr", "std", @@ -51760,7 +53364,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3142, + context: p4687, freeVariables: Identifiers{ "arr", "std", @@ -51780,7 +53384,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3142, + context: p4687, freeVariables: Identifiers{ "std", }, @@ -51799,7 +53403,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3142, + context: p4687, freeVariables: Identifiers{ "std", }, @@ -51845,7 +53449,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3153, + context: p4698, freeVariables: Identifiers{ "arr", }, @@ -51872,7 +53476,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3142, + context: p4687, freeVariables: Identifiers{ "arr", }, @@ -51893,7 +53497,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3142, + context: p4687, freeVariables: Identifiers{ "arr", "std", @@ -51913,7 +53517,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3142, + context: p4687, freeVariables: Identifiers{ "arr", "std", @@ -51933,7 +53537,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3142, + context: p4687, freeVariables: Identifiers{ "std", }, @@ -51952,7 +53556,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3142, + context: p4687, freeVariables: Identifiers{ "std", }, @@ -51998,7 +53602,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3168, + context: p4713, freeVariables: Identifiers{ "arr", }, @@ -52025,7 +53629,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3142, + context: p4687, freeVariables: Identifiers{ "arr", "std", @@ -52045,7 +53649,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3142, + context: p4687, freeVariables: Identifiers{ "std", }, @@ -52064,7 +53668,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3142, + context: p4687, freeVariables: Identifiers{ "std", }, @@ -52110,7 +53714,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3179, + context: p4724, freeVariables: nil, }, Value: "", @@ -52263,7 +53867,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3195, + context: p4740, freeVariables: Identifiers{ "std", "x", @@ -52283,7 +53887,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3195, + context: p4740, freeVariables: Identifiers{ "std", }, @@ -52302,7 +53906,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3195, + context: p4740, freeVariables: Identifiers{ "std", }, @@ -52348,7 +53952,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3204, + context: p4749, freeVariables: Identifiers{ "x", }, @@ -52379,7 +53983,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3179, + context: p4724, freeVariables: Identifiers{ "arr", }, @@ -52412,7 +54016,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3142, + context: p4687, freeVariables: Identifiers{ "arr", "std", @@ -52517,7 +54121,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3142, + context: p4687, freeVariables: nil, }, Value: "Expected string or array, got %s", @@ -52538,7 +54142,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3142, + context: p4687, freeVariables: Identifiers{ "arr", "std", @@ -52558,7 +54162,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3142, + context: p4687, freeVariables: Identifiers{ "std", }, @@ -52577,7 +54181,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3142, + context: p4687, freeVariables: Identifiers{ "std", }, @@ -52623,7 +54227,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3228, + context: p4773, freeVariables: Identifiers{ "arr", }, @@ -52706,7 +54310,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p4779, freeVariables: nil, }, }, @@ -52728,7 +54332,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p4781, freeVariables: nil, }, Value: "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/", @@ -52970,7 +54574,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p4805, freeVariables: Identifiers{ "base64_table", "i", @@ -52994,7 +54598,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p4809, freeVariables: Identifiers{ "base64_table", "i", @@ -53014,7 +54618,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p4812, freeVariables: Identifiers{ "base64_table", }, @@ -53035,7 +54639,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p4815, freeVariables: Identifiers{ "i", }, @@ -53058,7 +54662,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p43, + context: p4818, freeVariables: Identifiers{ "i", }, @@ -53087,7 +54691,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p4821, freeVariables: Identifiers{ "std", }, @@ -53106,7 +54710,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p4824, freeVariables: Identifiers{ "std", }, @@ -53125,7 +54729,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p4827, freeVariables: Identifiers{ "std", }, @@ -53171,7 +54775,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p54, + context: p4832, freeVariables: nil, }, Value: float64(0), @@ -53191,14 +54795,14 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p54, + context: p4834, freeVariables: nil, }, Value: float64(63), OriginalString: "63", }, }, - Named: nil, + Named: []NamedArgument{}, }, TrailingComma: false, TailStrict: false, @@ -53254,7 +54858,7 @@ var StdAst = &DesugaredObject{ }, file: nil, }, - context: p9, + context: p68, freeVariables: Identifiers{ "std", }, @@ -53281,7 +54885,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3236, + context: p4840, freeVariables: Identifiers{ "std", "str", @@ -53305,7 +54909,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3240, + context: p4844, freeVariables: Identifiers{ "std", }, @@ -53332,7 +54936,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3244, + context: p4848, freeVariables: Identifiers{ "i", "std", @@ -53353,7 +54957,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3244, + context: p4848, freeVariables: Identifiers{ "i", "std", @@ -53374,7 +54978,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3244, + context: p4848, freeVariables: Identifiers{ "i", }, @@ -53396,7 +55000,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3244, + context: p4848, freeVariables: Identifiers{ "std", "str", @@ -53416,7 +55020,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3244, + context: p4848, freeVariables: Identifiers{ "std", }, @@ -53435,7 +55039,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3244, + context: p4848, freeVariables: Identifiers{ "std", }, @@ -53481,7 +55085,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3259, + context: p4863, freeVariables: Identifiers{ "str", }, @@ -53509,7 +55113,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3244, + context: p4848, freeVariables: nil, }, Expr: &LiteralString{ @@ -53526,7 +55130,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3244, + context: p4848, freeVariables: nil, }, Value: "Truncated format code.", @@ -53548,7 +55152,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3244, + context: p4848, freeVariables: Identifiers{ "i", "std", @@ -53572,7 +55176,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3267, + context: p4871, freeVariables: Identifiers{ "i", "str", @@ -53592,7 +55196,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3267, + context: p4871, freeVariables: Identifiers{ "str", }, @@ -53613,7 +55217,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3267, + context: p4871, freeVariables: Identifiers{ "i", }, @@ -53639,7 +55243,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3244, + context: p4848, freeVariables: Identifiers{ "c", "i", @@ -53746,7 +55350,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3244, + context: p4848, freeVariables: Identifiers{ "c", }, @@ -53767,7 +55371,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3244, + context: p4848, freeVariables: nil, }, Value: "(", @@ -53794,7 +55398,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3244, + context: p4848, freeVariables: Identifiers{ "i", "std", @@ -53818,7 +55422,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3290, + context: p4894, freeVariables: Identifiers{ "consume", "std", @@ -53847,7 +55451,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3294, + context: p4898, freeVariables: Identifiers{ "consume", "j", @@ -53870,7 +55474,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3294, + context: p4898, freeVariables: Identifiers{ "j", "std", @@ -53891,7 +55495,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3294, + context: p4898, freeVariables: Identifiers{ "j", }, @@ -53913,7 +55517,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3294, + context: p4898, freeVariables: Identifiers{ "std", "str", @@ -53933,7 +55537,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3294, + context: p4898, freeVariables: Identifiers{ "std", }, @@ -53952,7 +55556,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3294, + context: p4898, freeVariables: Identifiers{ "std", }, @@ -53998,7 +55602,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3309, + context: p4913, freeVariables: Identifiers{ "str", }, @@ -54026,7 +55630,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3294, + context: p4898, freeVariables: nil, }, Expr: &LiteralString{ @@ -54043,7 +55647,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3294, + context: p4898, freeVariables: nil, }, Value: "Truncated format code.", @@ -54065,7 +55669,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3294, + context: p4898, freeVariables: Identifiers{ "consume", "j", @@ -54091,7 +55695,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3317, + context: p4921, freeVariables: Identifiers{ "j", "str", @@ -54111,7 +55715,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3317, + context: p4921, freeVariables: Identifiers{ "str", }, @@ -54132,7 +55736,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3317, + context: p4921, freeVariables: Identifiers{ "j", }, @@ -54158,7 +55762,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3294, + context: p4898, freeVariables: Identifiers{ "c", "consume", @@ -54288,7 +55892,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3294, + context: p4898, freeVariables: Identifiers{ "c", }, @@ -54309,7 +55913,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3294, + context: p4898, freeVariables: nil, }, Value: ")", @@ -54337,7 +55941,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3294, + context: p4898, freeVariables: Identifiers{ "c", "consume", @@ -54360,7 +55964,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3294, + context: p4898, freeVariables: Identifiers{ "consume", }, @@ -54383,7 +55987,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3344, + context: p4948, freeVariables: Identifiers{ "str", }, @@ -54404,7 +56008,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3344, + context: p4948, freeVariables: Identifiers{ "j", }, @@ -54423,7 +56027,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3344, + context: p4948, freeVariables: Identifiers{ "j", }, @@ -54445,7 +56049,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3344, + context: p4948, freeVariables: nil, }, Value: float64(1), @@ -54466,7 +56070,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3344, + context: p4948, freeVariables: Identifiers{ "c", "v", @@ -54486,7 +56090,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3344, + context: p4948, freeVariables: Identifiers{ "v", }, @@ -54508,7 +56112,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3344, + context: p4948, freeVariables: Identifiers{ "c", }, @@ -54536,7 +56140,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3294, + context: p4898, freeVariables: Identifiers{ "j", "v", @@ -54581,7 +56185,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3362, + context: p4966, freeVariables: Identifiers{ "j", }, @@ -54600,7 +56204,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3362, + context: p4966, freeVariables: Identifiers{ "j", }, @@ -54622,7 +56226,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3362, + context: p4966, freeVariables: nil, }, Value: float64(1), @@ -54668,7 +56272,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3362, + context: p4966, freeVariables: Identifiers{ "v", }, @@ -54700,7 +56304,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3244, + context: p4848, freeVariables: Identifiers{ "consume", "i", @@ -54721,7 +56325,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3244, + context: p4848, freeVariables: Identifiers{ "consume", }, @@ -54744,7 +56348,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3376, + context: p4980, freeVariables: Identifiers{ "str", }, @@ -54765,7 +56369,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3376, + context: p4980, freeVariables: Identifiers{ "i", }, @@ -54784,7 +56388,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3376, + context: p4980, freeVariables: Identifiers{ "i", }, @@ -54806,7 +56410,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3376, + context: p4980, freeVariables: nil, }, Value: float64(1), @@ -54827,7 +56431,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3376, + context: p4980, freeVariables: nil, }, Value: "", @@ -54855,7 +56459,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3244, + context: p4848, freeVariables: Identifiers{ "i", }, @@ -54899,7 +56503,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3389, + context: p4993, freeVariables: Identifiers{ "i", }, @@ -54945,7 +56549,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3389, + context: p4993, freeVariables: nil, }, }, @@ -54974,7 +56578,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3236, + context: p4840, freeVariables: Identifiers{ "std", "str", @@ -54999,7 +56603,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3397, + context: p5001, freeVariables: Identifiers{ "std", }, @@ -55026,7 +56630,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3401, + context: p5005, freeVariables: Identifiers{ "i", "std", @@ -55050,7 +56654,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3405, + context: p5009, freeVariables: Identifiers{ "consume", "std", @@ -55079,7 +56683,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3409, + context: p5013, freeVariables: Identifiers{ "consume", "j", @@ -55102,7 +56706,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3409, + context: p5013, freeVariables: Identifiers{ "j", "std", @@ -55123,7 +56727,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3409, + context: p5013, freeVariables: Identifiers{ "j", }, @@ -55145,7 +56749,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3409, + context: p5013, freeVariables: Identifiers{ "std", "str", @@ -55165,7 +56769,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3409, + context: p5013, freeVariables: Identifiers{ "std", }, @@ -55184,7 +56788,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3409, + context: p5013, freeVariables: Identifiers{ "std", }, @@ -55230,7 +56834,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3424, + context: p5028, freeVariables: Identifiers{ "str", }, @@ -55258,7 +56862,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3409, + context: p5013, freeVariables: nil, }, Expr: &LiteralString{ @@ -55275,7 +56879,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3409, + context: p5013, freeVariables: nil, }, Value: "Truncated format code.", @@ -55297,7 +56901,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3409, + context: p5013, freeVariables: Identifiers{ "consume", "j", @@ -55323,7 +56927,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3432, + context: p5036, freeVariables: Identifiers{ "j", "str", @@ -55343,7 +56947,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3432, + context: p5036, freeVariables: Identifiers{ "str", }, @@ -55364,7 +56968,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3432, + context: p5036, freeVariables: Identifiers{ "j", }, @@ -55390,7 +56994,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3409, + context: p5013, freeVariables: Identifiers{ "c", "consume", @@ -55499,7 +57103,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3409, + context: p5013, freeVariables: Identifiers{ "c", }, @@ -55520,7 +57124,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3409, + context: p5013, freeVariables: nil, }, Value: "#", @@ -55547,7 +57151,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3409, + context: p5013, freeVariables: Identifiers{ "consume", "j", @@ -55569,7 +57173,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3409, + context: p5013, freeVariables: Identifiers{ "consume", }, @@ -55592,7 +57196,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3457, + context: p5061, freeVariables: Identifiers{ "str", }, @@ -55613,7 +57217,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3457, + context: p5061, freeVariables: Identifiers{ "j", }, @@ -55632,7 +57236,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3457, + context: p5061, freeVariables: Identifiers{ "j", }, @@ -55654,7 +57258,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3457, + context: p5061, freeVariables: nil, }, Value: float64(1), @@ -55675,7 +57279,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3457, + context: p5061, freeVariables: Identifiers{ "v", }, @@ -55694,7 +57298,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3457, + context: p5061, freeVariables: Identifiers{ "v", }, @@ -55716,7 +57320,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3457, + context: p5061, freeVariables: nil, }, Asserts: nil, @@ -55758,7 +57362,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3472, + context: p5076, freeVariables: nil, }, Value: true, @@ -55788,7 +57392,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3409, + context: p5013, freeVariables: Identifiers{ "c", "consume", @@ -55897,7 +57501,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3409, + context: p5013, freeVariables: Identifiers{ "c", }, @@ -55918,7 +57522,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3409, + context: p5013, freeVariables: nil, }, Value: "0", @@ -55945,7 +57549,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3409, + context: p5013, freeVariables: Identifiers{ "consume", "j", @@ -55967,7 +57571,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3409, + context: p5013, freeVariables: Identifiers{ "consume", }, @@ -55990,7 +57594,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3492, + context: p5096, freeVariables: Identifiers{ "str", }, @@ -56011,7 +57615,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3492, + context: p5096, freeVariables: Identifiers{ "j", }, @@ -56030,7 +57634,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3492, + context: p5096, freeVariables: Identifiers{ "j", }, @@ -56052,7 +57656,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3492, + context: p5096, freeVariables: nil, }, Value: float64(1), @@ -56073,7 +57677,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3492, + context: p5096, freeVariables: Identifiers{ "v", }, @@ -56092,7 +57696,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3492, + context: p5096, freeVariables: Identifiers{ "v", }, @@ -56114,7 +57718,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3492, + context: p5096, freeVariables: nil, }, Asserts: nil, @@ -56156,7 +57760,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3507, + context: p5111, freeVariables: nil, }, Value: true, @@ -56186,7 +57790,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3409, + context: p5013, freeVariables: Identifiers{ "c", "consume", @@ -56295,7 +57899,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3409, + context: p5013, freeVariables: Identifiers{ "c", }, @@ -56316,7 +57920,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3409, + context: p5013, freeVariables: nil, }, Value: "-", @@ -56343,7 +57947,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3409, + context: p5013, freeVariables: Identifiers{ "consume", "j", @@ -56365,7 +57969,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3409, + context: p5013, freeVariables: Identifiers{ "consume", }, @@ -56388,7 +57992,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3527, + context: p5131, freeVariables: Identifiers{ "str", }, @@ -56409,7 +58013,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3527, + context: p5131, freeVariables: Identifiers{ "j", }, @@ -56428,7 +58032,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3527, + context: p5131, freeVariables: Identifiers{ "j", }, @@ -56450,7 +58054,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3527, + context: p5131, freeVariables: nil, }, Value: float64(1), @@ -56471,7 +58075,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3527, + context: p5131, freeVariables: Identifiers{ "v", }, @@ -56490,7 +58094,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3527, + context: p5131, freeVariables: Identifiers{ "v", }, @@ -56512,7 +58116,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3527, + context: p5131, freeVariables: nil, }, Asserts: nil, @@ -56554,7 +58158,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3542, + context: p5146, freeVariables: nil, }, Value: true, @@ -56584,7 +58188,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3409, + context: p5013, freeVariables: Identifiers{ "c", "consume", @@ -56693,7 +58297,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3409, + context: p5013, freeVariables: Identifiers{ "c", }, @@ -56714,7 +58318,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3409, + context: p5013, freeVariables: nil, }, Value: " ", @@ -56741,7 +58345,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3409, + context: p5013, freeVariables: Identifiers{ "consume", "j", @@ -56763,7 +58367,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3409, + context: p5013, freeVariables: Identifiers{ "consume", }, @@ -56786,7 +58390,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3562, + context: p5166, freeVariables: Identifiers{ "str", }, @@ -56807,7 +58411,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3562, + context: p5166, freeVariables: Identifiers{ "j", }, @@ -56826,7 +58430,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3562, + context: p5166, freeVariables: Identifiers{ "j", }, @@ -56848,7 +58452,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3562, + context: p5166, freeVariables: nil, }, Value: float64(1), @@ -56869,7 +58473,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3562, + context: p5166, freeVariables: Identifiers{ "v", }, @@ -56888,7 +58492,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3562, + context: p5166, freeVariables: Identifiers{ "v", }, @@ -56910,7 +58514,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3562, + context: p5166, freeVariables: nil, }, Asserts: nil, @@ -56952,7 +58556,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3577, + context: p5181, freeVariables: nil, }, Value: true, @@ -56982,7 +58586,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3409, + context: p5013, freeVariables: Identifiers{ "c", "consume", @@ -57091,7 +58695,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3409, + context: p5013, freeVariables: Identifiers{ "c", }, @@ -57112,7 +58716,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3409, + context: p5013, freeVariables: nil, }, Value: "+", @@ -57139,7 +58743,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3409, + context: p5013, freeVariables: Identifiers{ "consume", "j", @@ -57161,7 +58765,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3409, + context: p5013, freeVariables: Identifiers{ "consume", }, @@ -57184,7 +58788,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3597, + context: p5201, freeVariables: Identifiers{ "str", }, @@ -57205,7 +58809,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3597, + context: p5201, freeVariables: Identifiers{ "j", }, @@ -57224,7 +58828,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3597, + context: p5201, freeVariables: Identifiers{ "j", }, @@ -57246,7 +58850,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3597, + context: p5201, freeVariables: nil, }, Value: float64(1), @@ -57267,7 +58871,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3597, + context: p5201, freeVariables: Identifiers{ "v", }, @@ -57286,7 +58890,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3597, + context: p5201, freeVariables: Identifiers{ "v", }, @@ -57308,7 +58912,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3597, + context: p5201, freeVariables: nil, }, Asserts: nil, @@ -57350,7 +58954,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3612, + context: p5216, freeVariables: nil, }, Value: true, @@ -57380,7 +58984,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3409, + context: p5013, freeVariables: Identifiers{ "j", "v", @@ -57425,7 +59029,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3618, + context: p5222, freeVariables: Identifiers{ "j", }, @@ -57471,7 +59075,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3618, + context: p5222, freeVariables: Identifiers{ "v", }, @@ -57507,7 +59111,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3401, + context: p5005, freeVariables: Identifiers{ "consume", "i", @@ -57528,7 +59132,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3401, + context: p5005, freeVariables: Identifiers{ "consume", }, @@ -57551,7 +59155,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3629, + context: p5233, freeVariables: Identifiers{ "str", }, @@ -57572,7 +59176,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3629, + context: p5233, freeVariables: Identifiers{ "i", }, @@ -57593,7 +59197,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3629, + context: p5233, freeVariables: nil, }, Asserts: nil, @@ -57635,7 +59239,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3637, + context: p5241, freeVariables: nil, }, Value: false, @@ -57679,7 +59283,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3637, + context: p5241, freeVariables: nil, }, Value: false, @@ -57723,7 +59327,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3637, + context: p5241, freeVariables: nil, }, Value: false, @@ -57767,7 +59371,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3637, + context: p5241, freeVariables: nil, }, Value: false, @@ -57811,7 +59415,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3637, + context: p5241, freeVariables: nil, }, Value: false, @@ -57845,7 +59449,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3236, + context: p4840, freeVariables: Identifiers{ "std", "str", @@ -57871,7 +59475,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3650, + context: p5254, freeVariables: Identifiers{ "std", }, @@ -57898,7 +59502,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3654, + context: p5258, freeVariables: Identifiers{ "i", "std", @@ -57919,7 +59523,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3654, + context: p5258, freeVariables: Identifiers{ "i", "std", @@ -57940,7 +59544,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3654, + context: p5258, freeVariables: Identifiers{ "i", "std", @@ -57961,7 +59565,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3654, + context: p5258, freeVariables: Identifiers{ "i", }, @@ -57983,7 +59587,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3654, + context: p5258, freeVariables: Identifiers{ "std", "str", @@ -58003,7 +59607,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3654, + context: p5258, freeVariables: Identifiers{ "std", }, @@ -58022,7 +59626,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3654, + context: p5258, freeVariables: Identifiers{ "std", }, @@ -58068,7 +59672,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3671, + context: p5275, freeVariables: Identifiers{ "str", }, @@ -58183,7 +59787,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3654, + context: p5258, freeVariables: Identifiers{ "i", "str", @@ -58203,7 +59807,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3654, + context: p5258, freeVariables: Identifiers{ "str", }, @@ -58224,7 +59828,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3654, + context: p5258, freeVariables: Identifiers{ "i", }, @@ -58247,7 +59851,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3654, + context: p5258, freeVariables: nil, }, Value: "*", @@ -58275,7 +59879,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3654, + context: p5258, freeVariables: Identifiers{ "i", }, @@ -58319,7 +59923,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3693, + context: p5297, freeVariables: Identifiers{ "i", }, @@ -58338,7 +59942,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3693, + context: p5297, freeVariables: Identifiers{ "i", }, @@ -58360,7 +59964,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3693, + context: p5297, freeVariables: nil, }, Value: float64(1), @@ -58406,7 +60010,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3693, + context: p5297, freeVariables: nil, }, Value: "*", @@ -58431,7 +60035,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3654, + context: p5258, freeVariables: Identifiers{ "i", "std", @@ -58455,7 +60059,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3704, + context: p5308, freeVariables: Identifiers{ "consume", "std", @@ -58484,7 +60088,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3708, + context: p5312, freeVariables: Identifiers{ "consume", "j", @@ -58507,7 +60111,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3708, + context: p5312, freeVariables: Identifiers{ "j", "std", @@ -58528,7 +60132,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3708, + context: p5312, freeVariables: Identifiers{ "j", }, @@ -58550,7 +60154,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3708, + context: p5312, freeVariables: Identifiers{ "std", "str", @@ -58570,7 +60174,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3708, + context: p5312, freeVariables: Identifiers{ "std", }, @@ -58589,7 +60193,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3708, + context: p5312, freeVariables: Identifiers{ "std", }, @@ -58635,7 +60239,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3723, + context: p5327, freeVariables: Identifiers{ "str", }, @@ -58663,7 +60267,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3708, + context: p5312, freeVariables: nil, }, Expr: &LiteralString{ @@ -58680,7 +60284,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3708, + context: p5312, freeVariables: nil, }, Value: "Truncated format code.", @@ -58702,7 +60306,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3708, + context: p5312, freeVariables: Identifiers{ "consume", "j", @@ -58728,7 +60332,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3731, + context: p5335, freeVariables: Identifiers{ "j", "str", @@ -58748,7 +60352,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3731, + context: p5335, freeVariables: Identifiers{ "str", }, @@ -58769,7 +60373,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3731, + context: p5335, freeVariables: Identifiers{ "j", }, @@ -58795,7 +60399,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3708, + context: p5312, freeVariables: Identifiers{ "c", "consume", @@ -58904,7 +60508,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3708, + context: p5312, freeVariables: Identifiers{ "c", }, @@ -58925,7 +60529,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3708, + context: p5312, freeVariables: nil, }, Value: "0", @@ -58952,7 +60556,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3708, + context: p5312, freeVariables: Identifiers{ "consume", "j", @@ -58974,7 +60578,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3708, + context: p5312, freeVariables: Identifiers{ "consume", }, @@ -58997,7 +60601,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3756, + context: p5360, freeVariables: Identifiers{ "str", }, @@ -59018,7 +60622,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3756, + context: p5360, freeVariables: Identifiers{ "j", }, @@ -59037,7 +60641,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3756, + context: p5360, freeVariables: Identifiers{ "j", }, @@ -59059,7 +60663,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3756, + context: p5360, freeVariables: nil, }, Value: float64(1), @@ -59080,7 +60684,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3756, + context: p5360, freeVariables: Identifiers{ "v", }, @@ -59099,7 +60703,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3756, + context: p5360, freeVariables: Identifiers{ "v", }, @@ -59118,7 +60722,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3756, + context: p5360, freeVariables: Identifiers{ "v", }, @@ -59140,7 +60744,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3756, + context: p5360, freeVariables: nil, }, Value: float64(10), @@ -59162,7 +60766,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3756, + context: p5360, freeVariables: nil, }, Value: float64(0), @@ -59189,7 +60793,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3708, + context: p5312, freeVariables: Identifiers{ "c", "consume", @@ -59298,7 +60902,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3708, + context: p5312, freeVariables: Identifiers{ "c", }, @@ -59319,7 +60923,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3708, + context: p5312, freeVariables: nil, }, Value: "1", @@ -59346,7 +60950,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3708, + context: p5312, freeVariables: Identifiers{ "consume", "j", @@ -59368,7 +60972,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3708, + context: p5312, freeVariables: Identifiers{ "consume", }, @@ -59391,7 +60995,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3790, + context: p5394, freeVariables: Identifiers{ "str", }, @@ -59412,7 +61016,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3790, + context: p5394, freeVariables: Identifiers{ "j", }, @@ -59431,7 +61035,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3790, + context: p5394, freeVariables: Identifiers{ "j", }, @@ -59453,7 +61057,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3790, + context: p5394, freeVariables: nil, }, Value: float64(1), @@ -59474,7 +61078,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3790, + context: p5394, freeVariables: Identifiers{ "v", }, @@ -59493,7 +61097,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3790, + context: p5394, freeVariables: Identifiers{ "v", }, @@ -59512,7 +61116,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3790, + context: p5394, freeVariables: Identifiers{ "v", }, @@ -59534,7 +61138,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3790, + context: p5394, freeVariables: nil, }, Value: float64(10), @@ -59556,7 +61160,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3790, + context: p5394, freeVariables: nil, }, Value: float64(1), @@ -59583,7 +61187,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3708, + context: p5312, freeVariables: Identifiers{ "c", "consume", @@ -59692,7 +61296,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3708, + context: p5312, freeVariables: Identifiers{ "c", }, @@ -59713,7 +61317,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3708, + context: p5312, freeVariables: nil, }, Value: "2", @@ -59740,7 +61344,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3708, + context: p5312, freeVariables: Identifiers{ "consume", "j", @@ -59762,7 +61366,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3708, + context: p5312, freeVariables: Identifiers{ "consume", }, @@ -59785,7 +61389,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3824, + context: p5428, freeVariables: Identifiers{ "str", }, @@ -59806,7 +61410,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3824, + context: p5428, freeVariables: Identifiers{ "j", }, @@ -59825,7 +61429,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3824, + context: p5428, freeVariables: Identifiers{ "j", }, @@ -59847,7 +61451,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3824, + context: p5428, freeVariables: nil, }, Value: float64(1), @@ -59868,7 +61472,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3824, + context: p5428, freeVariables: Identifiers{ "v", }, @@ -59887,7 +61491,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3824, + context: p5428, freeVariables: Identifiers{ "v", }, @@ -59906,7 +61510,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3824, + context: p5428, freeVariables: Identifiers{ "v", }, @@ -59928,7 +61532,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3824, + context: p5428, freeVariables: nil, }, Value: float64(10), @@ -59950,7 +61554,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3824, + context: p5428, freeVariables: nil, }, Value: float64(2), @@ -59977,7 +61581,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3708, + context: p5312, freeVariables: Identifiers{ "c", "consume", @@ -60086,7 +61690,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3708, + context: p5312, freeVariables: Identifiers{ "c", }, @@ -60107,7 +61711,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3708, + context: p5312, freeVariables: nil, }, Value: "3", @@ -60134,7 +61738,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3708, + context: p5312, freeVariables: Identifiers{ "consume", "j", @@ -60156,7 +61760,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3708, + context: p5312, freeVariables: Identifiers{ "consume", }, @@ -60179,7 +61783,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3858, + context: p5462, freeVariables: Identifiers{ "str", }, @@ -60200,7 +61804,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3858, + context: p5462, freeVariables: Identifiers{ "j", }, @@ -60219,7 +61823,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3858, + context: p5462, freeVariables: Identifiers{ "j", }, @@ -60241,7 +61845,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3858, + context: p5462, freeVariables: nil, }, Value: float64(1), @@ -60262,7 +61866,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3858, + context: p5462, freeVariables: Identifiers{ "v", }, @@ -60281,7 +61885,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3858, + context: p5462, freeVariables: Identifiers{ "v", }, @@ -60300,7 +61904,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3858, + context: p5462, freeVariables: Identifiers{ "v", }, @@ -60322,7 +61926,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3858, + context: p5462, freeVariables: nil, }, Value: float64(10), @@ -60344,7 +61948,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3858, + context: p5462, freeVariables: nil, }, Value: float64(3), @@ -60371,7 +61975,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3708, + context: p5312, freeVariables: Identifiers{ "c", "consume", @@ -60480,7 +62084,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3708, + context: p5312, freeVariables: Identifiers{ "c", }, @@ -60501,7 +62105,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3708, + context: p5312, freeVariables: nil, }, Value: "4", @@ -60528,7 +62132,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3708, + context: p5312, freeVariables: Identifiers{ "consume", "j", @@ -60550,7 +62154,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3708, + context: p5312, freeVariables: Identifiers{ "consume", }, @@ -60573,7 +62177,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3892, + context: p5496, freeVariables: Identifiers{ "str", }, @@ -60594,7 +62198,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3892, + context: p5496, freeVariables: Identifiers{ "j", }, @@ -60613,7 +62217,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3892, + context: p5496, freeVariables: Identifiers{ "j", }, @@ -60635,7 +62239,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3892, + context: p5496, freeVariables: nil, }, Value: float64(1), @@ -60656,7 +62260,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3892, + context: p5496, freeVariables: Identifiers{ "v", }, @@ -60675,7 +62279,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3892, + context: p5496, freeVariables: Identifiers{ "v", }, @@ -60694,7 +62298,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3892, + context: p5496, freeVariables: Identifiers{ "v", }, @@ -60716,7 +62320,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3892, + context: p5496, freeVariables: nil, }, Value: float64(10), @@ -60738,7 +62342,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3892, + context: p5496, freeVariables: nil, }, Value: float64(4), @@ -60765,7 +62369,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3708, + context: p5312, freeVariables: Identifiers{ "c", "consume", @@ -60874,7 +62478,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3708, + context: p5312, freeVariables: Identifiers{ "c", }, @@ -60895,7 +62499,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3708, + context: p5312, freeVariables: nil, }, Value: "5", @@ -60922,7 +62526,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3708, + context: p5312, freeVariables: Identifiers{ "consume", "j", @@ -60944,7 +62548,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3708, + context: p5312, freeVariables: Identifiers{ "consume", }, @@ -60967,7 +62571,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3926, + context: p5530, freeVariables: Identifiers{ "str", }, @@ -60988,7 +62592,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3926, + context: p5530, freeVariables: Identifiers{ "j", }, @@ -61007,7 +62611,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3926, + context: p5530, freeVariables: Identifiers{ "j", }, @@ -61029,7 +62633,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3926, + context: p5530, freeVariables: nil, }, Value: float64(1), @@ -61050,7 +62654,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3926, + context: p5530, freeVariables: Identifiers{ "v", }, @@ -61069,7 +62673,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3926, + context: p5530, freeVariables: Identifiers{ "v", }, @@ -61088,7 +62692,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3926, + context: p5530, freeVariables: Identifiers{ "v", }, @@ -61110,7 +62714,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3926, + context: p5530, freeVariables: nil, }, Value: float64(10), @@ -61132,7 +62736,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3926, + context: p5530, freeVariables: nil, }, Value: float64(5), @@ -61159,7 +62763,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3708, + context: p5312, freeVariables: Identifiers{ "c", "consume", @@ -61268,7 +62872,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3708, + context: p5312, freeVariables: Identifiers{ "c", }, @@ -61289,7 +62893,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3708, + context: p5312, freeVariables: nil, }, Value: "6", @@ -61316,7 +62920,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3708, + context: p5312, freeVariables: Identifiers{ "consume", "j", @@ -61338,7 +62942,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3708, + context: p5312, freeVariables: Identifiers{ "consume", }, @@ -61361,7 +62965,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3960, + context: p5564, freeVariables: Identifiers{ "str", }, @@ -61382,7 +62986,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3960, + context: p5564, freeVariables: Identifiers{ "j", }, @@ -61401,7 +63005,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3960, + context: p5564, freeVariables: Identifiers{ "j", }, @@ -61423,7 +63027,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3960, + context: p5564, freeVariables: nil, }, Value: float64(1), @@ -61444,7 +63048,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3960, + context: p5564, freeVariables: Identifiers{ "v", }, @@ -61463,7 +63067,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3960, + context: p5564, freeVariables: Identifiers{ "v", }, @@ -61482,7 +63086,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3960, + context: p5564, freeVariables: Identifiers{ "v", }, @@ -61504,7 +63108,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3960, + context: p5564, freeVariables: nil, }, Value: float64(10), @@ -61526,7 +63130,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3960, + context: p5564, freeVariables: nil, }, Value: float64(6), @@ -61553,7 +63157,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3708, + context: p5312, freeVariables: Identifiers{ "c", "consume", @@ -61662,7 +63266,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3708, + context: p5312, freeVariables: Identifiers{ "c", }, @@ -61683,7 +63287,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3708, + context: p5312, freeVariables: nil, }, Value: "7", @@ -61710,7 +63314,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3708, + context: p5312, freeVariables: Identifiers{ "consume", "j", @@ -61732,7 +63336,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3708, + context: p5312, freeVariables: Identifiers{ "consume", }, @@ -61755,7 +63359,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3994, + context: p5598, freeVariables: Identifiers{ "str", }, @@ -61776,7 +63380,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3994, + context: p5598, freeVariables: Identifiers{ "j", }, @@ -61795,7 +63399,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3994, + context: p5598, freeVariables: Identifiers{ "j", }, @@ -61817,7 +63421,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3994, + context: p5598, freeVariables: nil, }, Value: float64(1), @@ -61838,7 +63442,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3994, + context: p5598, freeVariables: Identifiers{ "v", }, @@ -61857,7 +63461,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3994, + context: p5598, freeVariables: Identifiers{ "v", }, @@ -61876,7 +63480,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3994, + context: p5598, freeVariables: Identifiers{ "v", }, @@ -61898,7 +63502,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3994, + context: p5598, freeVariables: nil, }, Value: float64(10), @@ -61920,7 +63524,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3994, + context: p5598, freeVariables: nil, }, Value: float64(7), @@ -61947,7 +63551,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3708, + context: p5312, freeVariables: Identifiers{ "c", "consume", @@ -62056,7 +63660,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3708, + context: p5312, freeVariables: Identifiers{ "c", }, @@ -62077,7 +63681,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3708, + context: p5312, freeVariables: nil, }, Value: "8", @@ -62104,7 +63708,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3708, + context: p5312, freeVariables: Identifiers{ "consume", "j", @@ -62126,7 +63730,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3708, + context: p5312, freeVariables: Identifiers{ "consume", }, @@ -62149,7 +63753,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4028, + context: p5632, freeVariables: Identifiers{ "str", }, @@ -62170,7 +63774,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4028, + context: p5632, freeVariables: Identifiers{ "j", }, @@ -62189,7 +63793,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4028, + context: p5632, freeVariables: Identifiers{ "j", }, @@ -62211,7 +63815,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4028, + context: p5632, freeVariables: nil, }, Value: float64(1), @@ -62232,7 +63836,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4028, + context: p5632, freeVariables: Identifiers{ "v", }, @@ -62251,7 +63855,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4028, + context: p5632, freeVariables: Identifiers{ "v", }, @@ -62270,7 +63874,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4028, + context: p5632, freeVariables: Identifiers{ "v", }, @@ -62292,7 +63896,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4028, + context: p5632, freeVariables: nil, }, Value: float64(10), @@ -62314,7 +63918,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4028, + context: p5632, freeVariables: nil, }, Value: float64(8), @@ -62341,7 +63945,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3708, + context: p5312, freeVariables: Identifiers{ "c", "consume", @@ -62450,7 +64054,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3708, + context: p5312, freeVariables: Identifiers{ "c", }, @@ -62471,7 +64075,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3708, + context: p5312, freeVariables: nil, }, Value: "9", @@ -62498,7 +64102,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3708, + context: p5312, freeVariables: Identifiers{ "consume", "j", @@ -62520,7 +64124,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3708, + context: p5312, freeVariables: Identifiers{ "consume", }, @@ -62543,7 +64147,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4062, + context: p5666, freeVariables: Identifiers{ "str", }, @@ -62564,7 +64168,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4062, + context: p5666, freeVariables: Identifiers{ "j", }, @@ -62583,7 +64187,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4062, + context: p5666, freeVariables: Identifiers{ "j", }, @@ -62605,7 +64209,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4062, + context: p5666, freeVariables: nil, }, Value: float64(1), @@ -62626,7 +64230,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4062, + context: p5666, freeVariables: Identifiers{ "v", }, @@ -62645,7 +64249,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4062, + context: p5666, freeVariables: Identifiers{ "v", }, @@ -62664,7 +64268,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4062, + context: p5666, freeVariables: Identifiers{ "v", }, @@ -62686,7 +64290,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4062, + context: p5666, freeVariables: nil, }, Value: float64(10), @@ -62708,7 +64312,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4062, + context: p5666, freeVariables: nil, }, Value: float64(9), @@ -62735,7 +64339,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3708, + context: p5312, freeVariables: Identifiers{ "j", "v", @@ -62780,7 +64384,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4082, + context: p5686, freeVariables: Identifiers{ "j", }, @@ -62826,7 +64430,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4082, + context: p5686, freeVariables: Identifiers{ "v", }, @@ -62867,7 +64471,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3654, + context: p5258, freeVariables: Identifiers{ "consume", "i", @@ -62888,7 +64492,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3654, + context: p5258, freeVariables: Identifiers{ "consume", }, @@ -62911,7 +64515,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4093, + context: p5697, freeVariables: Identifiers{ "str", }, @@ -62932,7 +64536,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4093, + context: p5697, freeVariables: Identifiers{ "i", }, @@ -62953,7 +64557,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4093, + context: p5697, freeVariables: nil, }, Value: float64(0), @@ -62985,7 +64589,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3236, + context: p4840, freeVariables: Identifiers{ "std", "str", @@ -63012,7 +64616,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4102, + context: p5706, freeVariables: Identifiers{ "std", "try_parse_field_width", @@ -63040,7 +64644,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4106, + context: p5710, freeVariables: Identifiers{ "i", "std", @@ -63062,7 +64666,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4106, + context: p5710, freeVariables: Identifiers{ "i", "std", @@ -63083,7 +64687,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4106, + context: p5710, freeVariables: Identifiers{ "i", }, @@ -63105,7 +64709,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4106, + context: p5710, freeVariables: Identifiers{ "std", "str", @@ -63125,7 +64729,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4106, + context: p5710, freeVariables: Identifiers{ "std", }, @@ -63144,7 +64748,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4106, + context: p5710, freeVariables: Identifiers{ "std", }, @@ -63190,7 +64794,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4121, + context: p5725, freeVariables: Identifiers{ "str", }, @@ -63218,7 +64822,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4106, + context: p5710, freeVariables: nil, }, Expr: &LiteralString{ @@ -63235,7 +64839,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4106, + context: p5710, freeVariables: nil, }, Value: "Truncated format code.", @@ -63257,7 +64861,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4106, + context: p5710, freeVariables: Identifiers{ "i", "std", @@ -63282,7 +64886,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4129, + context: p5733, freeVariables: Identifiers{ "i", "str", @@ -63302,7 +64906,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4129, + context: p5733, freeVariables: Identifiers{ "str", }, @@ -63323,7 +64927,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4129, + context: p5733, freeVariables: Identifiers{ "i", }, @@ -63349,7 +64953,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4106, + context: p5710, freeVariables: Identifiers{ "c", "i", @@ -63457,7 +65061,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4106, + context: p5710, freeVariables: Identifiers{ "c", }, @@ -63478,7 +65082,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4106, + context: p5710, freeVariables: nil, }, Value: ".", @@ -63505,7 +65109,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4106, + context: p5710, freeVariables: Identifiers{ "i", "str", @@ -63526,7 +65130,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4106, + context: p5710, freeVariables: Identifiers{ "try_parse_field_width", }, @@ -63549,7 +65153,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4154, + context: p5758, freeVariables: Identifiers{ "str", }, @@ -63570,7 +65174,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4154, + context: p5758, freeVariables: Identifiers{ "i", }, @@ -63589,7 +65193,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4154, + context: p5758, freeVariables: Identifiers{ "i", }, @@ -63611,7 +65215,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4154, + context: p5758, freeVariables: nil, }, Value: float64(1), @@ -63638,7 +65242,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4106, + context: p5710, freeVariables: Identifiers{ "i", }, @@ -63682,7 +65286,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4166, + context: p5770, freeVariables: Identifiers{ "i", }, @@ -63728,7 +65332,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4166, + context: p5770, freeVariables: nil, }, }, @@ -63757,7 +65361,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3236, + context: p4840, freeVariables: Identifiers{ "std", "str", @@ -63785,7 +65389,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4174, + context: p5778, freeVariables: Identifiers{ "std", }, @@ -63812,7 +65416,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4178, + context: p5782, freeVariables: Identifiers{ "i", "std", @@ -63833,7 +65437,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4178, + context: p5782, freeVariables: Identifiers{ "i", "std", @@ -63854,7 +65458,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4178, + context: p5782, freeVariables: Identifiers{ "i", }, @@ -63876,7 +65480,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4178, + context: p5782, freeVariables: Identifiers{ "std", "str", @@ -63896,7 +65500,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4178, + context: p5782, freeVariables: Identifiers{ "std", }, @@ -63915,7 +65519,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4178, + context: p5782, freeVariables: Identifiers{ "std", }, @@ -63961,7 +65565,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4193, + context: p5797, freeVariables: Identifiers{ "str", }, @@ -63989,7 +65593,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4178, + context: p5782, freeVariables: nil, }, Expr: &LiteralString{ @@ -64006,7 +65610,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4178, + context: p5782, freeVariables: nil, }, Value: "Truncated format code.", @@ -64028,7 +65632,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4178, + context: p5782, freeVariables: Identifiers{ "i", "std", @@ -64052,7 +65656,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4201, + context: p5805, freeVariables: Identifiers{ "i", "str", @@ -64072,7 +65676,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4201, + context: p5805, freeVariables: Identifiers{ "str", }, @@ -64093,7 +65697,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4201, + context: p5805, freeVariables: Identifiers{ "i", }, @@ -64119,7 +65723,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4178, + context: p5782, freeVariables: Identifiers{ "c", "i", @@ -64140,7 +65744,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4178, + context: p5782, freeVariables: Identifiers{ "c", "std", @@ -64160,7 +65764,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4178, + context: p5782, freeVariables: Identifiers{ "c", "std", @@ -64265,7 +65869,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4178, + context: p5782, freeVariables: Identifiers{ "c", }, @@ -64286,7 +65890,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4178, + context: p5782, freeVariables: nil, }, Value: "h", @@ -64399,7 +66003,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4178, + context: p5782, freeVariables: Identifiers{ "c", }, @@ -64420,7 +66024,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4178, + context: p5782, freeVariables: nil, }, Value: "l", @@ -64534,7 +66138,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4178, + context: p5782, freeVariables: Identifiers{ "c", }, @@ -64555,7 +66159,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4178, + context: p5782, freeVariables: nil, }, Value: "L", @@ -64583,7 +66187,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4178, + context: p5782, freeVariables: Identifiers{ "i", }, @@ -64602,7 +66206,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4178, + context: p5782, freeVariables: Identifiers{ "i", }, @@ -64624,7 +66228,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4178, + context: p5782, freeVariables: nil, }, Value: float64(1), @@ -64645,7 +66249,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4178, + context: p5782, freeVariables: Identifiers{ "i", }, @@ -64673,7 +66277,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3236, + context: p4840, freeVariables: Identifiers{ "std", "str", @@ -64702,7 +66306,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4257, + context: p5861, freeVariables: Identifiers{ "std", }, @@ -64729,7 +66333,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4261, + context: p5865, freeVariables: Identifiers{ "i", "std", @@ -64750,7 +66354,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4261, + context: p5865, freeVariables: Identifiers{ "i", "std", @@ -64771,7 +66375,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4261, + context: p5865, freeVariables: Identifiers{ "i", }, @@ -64793,7 +66397,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4261, + context: p5865, freeVariables: Identifiers{ "std", "str", @@ -64813,7 +66417,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4261, + context: p5865, freeVariables: Identifiers{ "std", }, @@ -64832,7 +66436,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4261, + context: p5865, freeVariables: Identifiers{ "std", }, @@ -64878,7 +66482,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4276, + context: p5880, freeVariables: Identifiers{ "str", }, @@ -64906,7 +66510,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4261, + context: p5865, freeVariables: nil, }, Expr: &LiteralString{ @@ -64923,7 +66527,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4261, + context: p5865, freeVariables: nil, }, Value: "Truncated format code.", @@ -64945,7 +66549,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4261, + context: p5865, freeVariables: Identifiers{ "i", "std", @@ -64969,7 +66573,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4284, + context: p5888, freeVariables: Identifiers{ "i", "str", @@ -64989,7 +66593,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4284, + context: p5888, freeVariables: Identifiers{ "str", }, @@ -65010,7 +66614,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4284, + context: p5888, freeVariables: Identifiers{ "i", }, @@ -65036,7 +66640,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4261, + context: p5865, freeVariables: Identifiers{ "c", "i", @@ -65057,7 +66661,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4261, + context: p5865, freeVariables: Identifiers{ "c", "std", @@ -65077,7 +66681,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4261, + context: p5865, freeVariables: Identifiers{ "c", "std", @@ -65182,7 +66786,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4261, + context: p5865, freeVariables: Identifiers{ "c", }, @@ -65203,7 +66807,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4261, + context: p5865, freeVariables: nil, }, Value: "d", @@ -65316,7 +66920,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4261, + context: p5865, freeVariables: Identifiers{ "c", }, @@ -65337,7 +66941,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4261, + context: p5865, freeVariables: nil, }, Value: "i", @@ -65451,7 +67055,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4261, + context: p5865, freeVariables: Identifiers{ "c", }, @@ -65472,7 +67076,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4261, + context: p5865, freeVariables: nil, }, Value: "u", @@ -65500,7 +67104,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4261, + context: p5865, freeVariables: Identifiers{ "i", }, @@ -65544,7 +67148,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4334, + context: p5938, freeVariables: Identifiers{ "i", }, @@ -65563,7 +67167,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4334, + context: p5938, freeVariables: Identifiers{ "i", }, @@ -65585,7 +67189,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4334, + context: p5938, freeVariables: nil, }, Value: float64(1), @@ -65631,7 +67235,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4334, + context: p5938, freeVariables: nil, }, Value: "d", @@ -65677,7 +67281,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4334, + context: p5938, freeVariables: nil, }, Value: false, @@ -65700,7 +67304,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4261, + context: p5865, freeVariables: Identifiers{ "c", "i", @@ -65806,7 +67410,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4261, + context: p5865, freeVariables: Identifiers{ "c", }, @@ -65827,7 +67431,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4261, + context: p5865, freeVariables: nil, }, Value: "o", @@ -65854,7 +67458,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4261, + context: p5865, freeVariables: Identifiers{ "i", }, @@ -65898,7 +67502,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4361, + context: p5965, freeVariables: Identifiers{ "i", }, @@ -65917,7 +67521,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4361, + context: p5965, freeVariables: Identifiers{ "i", }, @@ -65939,7 +67543,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4361, + context: p5965, freeVariables: nil, }, Value: float64(1), @@ -65985,7 +67589,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4361, + context: p5965, freeVariables: nil, }, Value: "o", @@ -66031,7 +67635,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4361, + context: p5965, freeVariables: nil, }, Value: false, @@ -66054,7 +67658,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4261, + context: p5865, freeVariables: Identifiers{ "c", "i", @@ -66160,7 +67764,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4261, + context: p5865, freeVariables: Identifiers{ "c", }, @@ -66181,7 +67785,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4261, + context: p5865, freeVariables: nil, }, Value: "x", @@ -66208,7 +67812,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4261, + context: p5865, freeVariables: Identifiers{ "i", }, @@ -66252,7 +67856,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4388, + context: p5992, freeVariables: Identifiers{ "i", }, @@ -66271,7 +67875,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4388, + context: p5992, freeVariables: Identifiers{ "i", }, @@ -66293,7 +67897,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4388, + context: p5992, freeVariables: nil, }, Value: float64(1), @@ -66339,7 +67943,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4388, + context: p5992, freeVariables: nil, }, Value: "x", @@ -66385,7 +67989,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4388, + context: p5992, freeVariables: nil, }, Value: false, @@ -66408,7 +68012,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4261, + context: p5865, freeVariables: Identifiers{ "c", "i", @@ -66514,7 +68118,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4261, + context: p5865, freeVariables: Identifiers{ "c", }, @@ -66535,7 +68139,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4261, + context: p5865, freeVariables: nil, }, Value: "X", @@ -66562,7 +68166,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4261, + context: p5865, freeVariables: Identifiers{ "i", }, @@ -66606,7 +68210,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4415, + context: p6019, freeVariables: Identifiers{ "i", }, @@ -66625,7 +68229,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4415, + context: p6019, freeVariables: Identifiers{ "i", }, @@ -66647,7 +68251,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4415, + context: p6019, freeVariables: nil, }, Value: float64(1), @@ -66693,7 +68297,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4415, + context: p6019, freeVariables: nil, }, Value: "x", @@ -66739,7 +68343,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4415, + context: p6019, freeVariables: nil, }, Value: true, @@ -66762,7 +68366,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4261, + context: p5865, freeVariables: Identifiers{ "c", "i", @@ -66868,7 +68472,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4261, + context: p5865, freeVariables: Identifiers{ "c", }, @@ -66889,7 +68493,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4261, + context: p5865, freeVariables: nil, }, Value: "e", @@ -66916,7 +68520,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4261, + context: p5865, freeVariables: Identifiers{ "i", }, @@ -66960,7 +68564,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4442, + context: p6046, freeVariables: Identifiers{ "i", }, @@ -66979,7 +68583,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4442, + context: p6046, freeVariables: Identifiers{ "i", }, @@ -67001,7 +68605,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4442, + context: p6046, freeVariables: nil, }, Value: float64(1), @@ -67047,7 +68651,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4442, + context: p6046, freeVariables: nil, }, Value: "e", @@ -67093,7 +68697,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4442, + context: p6046, freeVariables: nil, }, Value: false, @@ -67116,7 +68720,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4261, + context: p5865, freeVariables: Identifiers{ "c", "i", @@ -67222,7 +68826,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4261, + context: p5865, freeVariables: Identifiers{ "c", }, @@ -67243,7 +68847,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4261, + context: p5865, freeVariables: nil, }, Value: "E", @@ -67270,7 +68874,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4261, + context: p5865, freeVariables: Identifiers{ "i", }, @@ -67314,7 +68918,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4469, + context: p6073, freeVariables: Identifiers{ "i", }, @@ -67333,7 +68937,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4469, + context: p6073, freeVariables: Identifiers{ "i", }, @@ -67355,7 +68959,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4469, + context: p6073, freeVariables: nil, }, Value: float64(1), @@ -67401,7 +69005,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4469, + context: p6073, freeVariables: nil, }, Value: "e", @@ -67447,7 +69051,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4469, + context: p6073, freeVariables: nil, }, Value: true, @@ -67470,7 +69074,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4261, + context: p5865, freeVariables: Identifiers{ "c", "i", @@ -67576,7 +69180,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4261, + context: p5865, freeVariables: Identifiers{ "c", }, @@ -67597,7 +69201,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4261, + context: p5865, freeVariables: nil, }, Value: "f", @@ -67624,7 +69228,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4261, + context: p5865, freeVariables: Identifiers{ "i", }, @@ -67668,7 +69272,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4496, + context: p6100, freeVariables: Identifiers{ "i", }, @@ -67687,7 +69291,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4496, + context: p6100, freeVariables: Identifiers{ "i", }, @@ -67709,7 +69313,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4496, + context: p6100, freeVariables: nil, }, Value: float64(1), @@ -67755,7 +69359,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4496, + context: p6100, freeVariables: nil, }, Value: "f", @@ -67801,7 +69405,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4496, + context: p6100, freeVariables: nil, }, Value: false, @@ -67824,7 +69428,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4261, + context: p5865, freeVariables: Identifiers{ "c", "i", @@ -67930,7 +69534,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4261, + context: p5865, freeVariables: Identifiers{ "c", }, @@ -67951,7 +69555,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4261, + context: p5865, freeVariables: nil, }, Value: "F", @@ -67978,7 +69582,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4261, + context: p5865, freeVariables: Identifiers{ "i", }, @@ -68022,7 +69626,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4523, + context: p6127, freeVariables: Identifiers{ "i", }, @@ -68041,7 +69645,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4523, + context: p6127, freeVariables: Identifiers{ "i", }, @@ -68063,7 +69667,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4523, + context: p6127, freeVariables: nil, }, Value: float64(1), @@ -68109,7 +69713,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4523, + context: p6127, freeVariables: nil, }, Value: "f", @@ -68155,7 +69759,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4523, + context: p6127, freeVariables: nil, }, Value: true, @@ -68178,7 +69782,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4261, + context: p5865, freeVariables: Identifiers{ "c", "i", @@ -68284,7 +69888,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4261, + context: p5865, freeVariables: Identifiers{ "c", }, @@ -68305,7 +69909,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4261, + context: p5865, freeVariables: nil, }, Value: "g", @@ -68332,7 +69936,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4261, + context: p5865, freeVariables: Identifiers{ "i", }, @@ -68376,7 +69980,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4550, + context: p6154, freeVariables: Identifiers{ "i", }, @@ -68395,7 +69999,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4550, + context: p6154, freeVariables: Identifiers{ "i", }, @@ -68417,7 +70021,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4550, + context: p6154, freeVariables: nil, }, Value: float64(1), @@ -68463,7 +70067,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4550, + context: p6154, freeVariables: nil, }, Value: "g", @@ -68509,7 +70113,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4550, + context: p6154, freeVariables: nil, }, Value: false, @@ -68532,7 +70136,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4261, + context: p5865, freeVariables: Identifiers{ "c", "i", @@ -68638,7 +70242,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4261, + context: p5865, freeVariables: Identifiers{ "c", }, @@ -68659,7 +70263,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4261, + context: p5865, freeVariables: nil, }, Value: "G", @@ -68686,7 +70290,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4261, + context: p5865, freeVariables: Identifiers{ "i", }, @@ -68730,7 +70334,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4577, + context: p6181, freeVariables: Identifiers{ "i", }, @@ -68749,7 +70353,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4577, + context: p6181, freeVariables: Identifiers{ "i", }, @@ -68771,7 +70375,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4577, + context: p6181, freeVariables: nil, }, Value: float64(1), @@ -68817,7 +70421,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4577, + context: p6181, freeVariables: nil, }, Value: "g", @@ -68863,7 +70467,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4577, + context: p6181, freeVariables: nil, }, Value: true, @@ -68886,7 +70490,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4261, + context: p5865, freeVariables: Identifiers{ "c", "i", @@ -68992,7 +70596,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4261, + context: p5865, freeVariables: Identifiers{ "c", }, @@ -69013,7 +70617,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4261, + context: p5865, freeVariables: nil, }, Value: "c", @@ -69040,7 +70644,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4261, + context: p5865, freeVariables: Identifiers{ "i", }, @@ -69084,7 +70688,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4604, + context: p6208, freeVariables: Identifiers{ "i", }, @@ -69103,7 +70707,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4604, + context: p6208, freeVariables: Identifiers{ "i", }, @@ -69125,7 +70729,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4604, + context: p6208, freeVariables: nil, }, Value: float64(1), @@ -69171,7 +70775,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4604, + context: p6208, freeVariables: nil, }, Value: "c", @@ -69217,7 +70821,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4604, + context: p6208, freeVariables: nil, }, Value: false, @@ -69240,7 +70844,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4261, + context: p5865, freeVariables: Identifiers{ "c", "i", @@ -69346,7 +70950,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4261, + context: p5865, freeVariables: Identifiers{ "c", }, @@ -69367,7 +70971,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4261, + context: p5865, freeVariables: nil, }, Value: "s", @@ -69394,7 +70998,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4261, + context: p5865, freeVariables: Identifiers{ "i", }, @@ -69438,7 +71042,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4631, + context: p6235, freeVariables: Identifiers{ "i", }, @@ -69457,7 +71061,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4631, + context: p6235, freeVariables: Identifiers{ "i", }, @@ -69479,7 +71083,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4631, + context: p6235, freeVariables: nil, }, Value: float64(1), @@ -69525,7 +71129,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4631, + context: p6235, freeVariables: nil, }, Value: "s", @@ -69571,7 +71175,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4631, + context: p6235, freeVariables: nil, }, Value: false, @@ -69594,7 +71198,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4261, + context: p5865, freeVariables: Identifiers{ "c", "i", @@ -69700,7 +71304,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4261, + context: p5865, freeVariables: Identifiers{ "c", }, @@ -69721,7 +71325,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4261, + context: p5865, freeVariables: nil, }, Value: "%", @@ -69748,7 +71352,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4261, + context: p5865, freeVariables: Identifiers{ "i", }, @@ -69792,7 +71396,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4658, + context: p6262, freeVariables: Identifiers{ "i", }, @@ -69811,7 +71415,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4658, + context: p6262, freeVariables: Identifiers{ "i", }, @@ -69833,7 +71437,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4658, + context: p6262, freeVariables: nil, }, Value: float64(1), @@ -69879,7 +71483,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4658, + context: p6262, freeVariables: nil, }, Value: "%", @@ -69925,7 +71529,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4658, + context: p6262, freeVariables: nil, }, Value: false, @@ -69948,7 +71552,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4261, + context: p5865, freeVariables: Identifiers{ "c", }, @@ -69967,7 +71571,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4261, + context: p5865, freeVariables: Identifiers{ "c", }, @@ -69986,7 +71590,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4261, + context: p5865, freeVariables: nil, }, Value: "Unrecognised conversion type: ", @@ -70008,7 +71612,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4261, + context: p5865, freeVariables: Identifiers{ "c", }, @@ -70050,7 +71654,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3236, + context: p4840, freeVariables: Identifiers{ "parse_conv_type", "std", @@ -70080,7 +71684,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4678, + context: p6282, freeVariables: Identifiers{ "parse_conv_type", "std", @@ -70113,7 +71717,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4682, + context: p6286, freeVariables: Identifiers{ "i", "parse_conv_type", @@ -70140,7 +71744,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4682, + context: p6286, freeVariables: Identifiers{ "i", "std", @@ -70161,7 +71765,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4682, + context: p6286, freeVariables: Identifiers{ "i", }, @@ -70183,7 +71787,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4682, + context: p6286, freeVariables: Identifiers{ "std", "str", @@ -70203,7 +71807,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4682, + context: p6286, freeVariables: Identifiers{ "std", }, @@ -70222,7 +71826,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4682, + context: p6286, freeVariables: Identifiers{ "std", }, @@ -70268,7 +71872,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4697, + context: p6301, freeVariables: Identifiers{ "str", }, @@ -70296,7 +71900,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4682, + context: p6286, freeVariables: nil, }, Expr: &LiteralString{ @@ -70313,7 +71917,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4682, + context: p6286, freeVariables: nil, }, Value: "Truncated format code.", @@ -70335,7 +71939,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4682, + context: p6286, freeVariables: Identifiers{ "i", "parse_conv_type", @@ -70364,7 +71968,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4705, + context: p6309, freeVariables: Identifiers{ "i", "str", @@ -70385,7 +71989,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4705, + context: p6309, freeVariables: Identifiers{ "try_parse_mapping_key", }, @@ -70408,7 +72012,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4711, + context: p6315, freeVariables: Identifiers{ "str", }, @@ -70429,7 +72033,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4711, + context: p6315, freeVariables: Identifiers{ "i", }, @@ -70459,7 +72063,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4682, + context: p6286, freeVariables: Identifiers{ "mkey", "parse_conv_type", @@ -70487,7 +72091,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4719, + context: p6323, freeVariables: Identifiers{ "mkey", "str", @@ -70508,7 +72112,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4719, + context: p6323, freeVariables: Identifiers{ "try_parse_cflags", }, @@ -70531,7 +72135,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4725, + context: p6329, freeVariables: Identifiers{ "str", }, @@ -70552,7 +72156,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4725, + context: p6329, freeVariables: Identifiers{ "mkey", }, @@ -70571,7 +72175,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4725, + context: p6329, freeVariables: Identifiers{ "mkey", }, @@ -70624,7 +72228,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4682, + context: p6286, freeVariables: Identifiers{ "cflags", "mkey", @@ -70652,7 +72256,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4736, + context: p6340, freeVariables: Identifiers{ "cflags", "str", @@ -70673,7 +72277,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4736, + context: p6340, freeVariables: Identifiers{ "try_parse_field_width", }, @@ -70696,7 +72300,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4742, + context: p6346, freeVariables: Identifiers{ "str", }, @@ -70717,7 +72321,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4742, + context: p6346, freeVariables: Identifiers{ "cflags", }, @@ -70736,7 +72340,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4742, + context: p6346, freeVariables: Identifiers{ "cflags", }, @@ -70789,7 +72393,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4682, + context: p6286, freeVariables: Identifiers{ "cflags", "fw", @@ -70817,7 +72421,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4753, + context: p6357, freeVariables: Identifiers{ "fw", "str", @@ -70838,7 +72442,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4753, + context: p6357, freeVariables: Identifiers{ "try_parse_precision", }, @@ -70861,7 +72465,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4759, + context: p6363, freeVariables: Identifiers{ "str", }, @@ -70882,7 +72486,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4759, + context: p6363, freeVariables: Identifiers{ "fw", }, @@ -70901,7 +72505,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4759, + context: p6363, freeVariables: Identifiers{ "fw", }, @@ -70954,7 +72558,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4682, + context: p6286, freeVariables: Identifiers{ "cflags", "fw", @@ -70982,7 +72586,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4770, + context: p6374, freeVariables: Identifiers{ "prec", "str", @@ -71003,7 +72607,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4770, + context: p6374, freeVariables: Identifiers{ "try_parse_length_modifier", }, @@ -71026,7 +72630,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4776, + context: p6380, freeVariables: Identifiers{ "str", }, @@ -71047,7 +72651,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4776, + context: p6380, freeVariables: Identifiers{ "prec", }, @@ -71066,7 +72670,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4776, + context: p6380, freeVariables: Identifiers{ "prec", }, @@ -71119,7 +72723,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4682, + context: p6286, freeVariables: Identifiers{ "cflags", "fw", @@ -71147,7 +72751,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4787, + context: p6391, freeVariables: Identifiers{ "len_mod", "parse_conv_type", @@ -71168,7 +72772,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4787, + context: p6391, freeVariables: Identifiers{ "parse_conv_type", }, @@ -71191,7 +72795,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4793, + context: p6397, freeVariables: Identifiers{ "str", }, @@ -71212,7 +72816,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4793, + context: p6397, freeVariables: Identifiers{ "len_mod", }, @@ -71242,7 +72846,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4682, + context: p6286, freeVariables: Identifiers{ "cflags", "ctype", @@ -71290,7 +72894,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4802, + context: p6406, freeVariables: Identifiers{ "ctype", }, @@ -71309,7 +72913,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4802, + context: p6406, freeVariables: Identifiers{ "ctype", }, @@ -71378,7 +72982,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4802, + context: p6406, freeVariables: Identifiers{ "cflags", "ctype", @@ -71426,7 +73030,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4813, + context: p6417, freeVariables: Identifiers{ "mkey", }, @@ -71445,7 +73049,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4813, + context: p6417, freeVariables: Identifiers{ "mkey", }, @@ -71514,7 +73118,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4813, + context: p6417, freeVariables: Identifiers{ "cflags", }, @@ -71533,7 +73137,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4813, + context: p6417, freeVariables: Identifiers{ "cflags", }, @@ -71602,7 +73206,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4813, + context: p6417, freeVariables: Identifiers{ "fw", }, @@ -71621,7 +73225,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4813, + context: p6417, freeVariables: Identifiers{ "fw", }, @@ -71690,7 +73294,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4813, + context: p6417, freeVariables: Identifiers{ "prec", }, @@ -71709,7 +73313,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4813, + context: p6417, freeVariables: Identifiers{ "prec", }, @@ -71778,7 +73382,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4813, + context: p6417, freeVariables: Identifiers{ "ctype", }, @@ -71797,7 +73401,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4813, + context: p6417, freeVariables: Identifiers{ "ctype", }, @@ -71866,7 +73470,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4813, + context: p6417, freeVariables: Identifiers{ "ctype", }, @@ -71885,7 +73489,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4813, + context: p6417, freeVariables: Identifiers{ "ctype", }, @@ -71948,7 +73552,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3236, + context: p4840, freeVariables: Identifiers{ "parse_code", "std", @@ -71973,7 +73577,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4852, + context: p6456, freeVariables: Identifiers{ "parse_code", "parse_codes", @@ -72004,7 +73608,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4856, + context: p6460, freeVariables: Identifiers{ "cur", "i", @@ -72029,7 +73633,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4856, + context: p6460, freeVariables: Identifiers{ "i", "std", @@ -72050,7 +73654,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4856, + context: p6460, freeVariables: Identifiers{ "i", }, @@ -72072,7 +73676,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4856, + context: p6460, freeVariables: Identifiers{ "std", "str", @@ -72092,7 +73696,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4856, + context: p6460, freeVariables: Identifiers{ "std", }, @@ -72111,7 +73715,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4856, + context: p6460, freeVariables: Identifiers{ "std", }, @@ -72157,7 +73761,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4871, + context: p6475, freeVariables: Identifiers{ "str", }, @@ -72185,7 +73789,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4856, + context: p6460, freeVariables: Identifiers{ "cur", "out", @@ -72205,7 +73809,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4856, + context: p6460, freeVariables: Identifiers{ "out", }, @@ -72227,7 +73831,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4856, + context: p6460, freeVariables: Identifiers{ "cur", }, @@ -72247,7 +73851,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4881, + context: p6485, freeVariables: Identifiers{ "cur", }, @@ -72272,7 +73876,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4856, + context: p6460, freeVariables: Identifiers{ "cur", "i", @@ -72300,7 +73904,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4887, + context: p6491, freeVariables: Identifiers{ "i", "str", @@ -72320,7 +73924,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4887, + context: p6491, freeVariables: Identifiers{ "str", }, @@ -72341,7 +73945,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4887, + context: p6491, freeVariables: Identifiers{ "i", }, @@ -72367,7 +73971,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4856, + context: p6460, freeVariables: Identifiers{ "c", "cur", @@ -72478,7 +74082,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4856, + context: p6460, freeVariables: Identifiers{ "c", }, @@ -72499,7 +74103,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4856, + context: p6460, freeVariables: nil, }, Value: "%", @@ -72526,7 +74130,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4856, + context: p6460, freeVariables: Identifiers{ "cur", "i", @@ -72553,7 +74157,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4910, + context: p6514, freeVariables: Identifiers{ "i", "parse_code", @@ -72574,7 +74178,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4910, + context: p6514, freeVariables: Identifiers{ "parse_code", }, @@ -72597,7 +74201,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4916, + context: p6520, freeVariables: Identifiers{ "str", }, @@ -72618,7 +74222,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4916, + context: p6520, freeVariables: Identifiers{ "i", }, @@ -72637,7 +74241,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4916, + context: p6520, freeVariables: Identifiers{ "i", }, @@ -72659,7 +74263,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4916, + context: p6520, freeVariables: nil, }, Value: float64(1), @@ -72689,7 +74293,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4856, + context: p6460, freeVariables: Identifiers{ "cur", "out", @@ -72712,7 +74316,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4856, + context: p6460, freeVariables: Identifiers{ "parse_codes", }, @@ -72735,7 +74339,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4929, + context: p6533, freeVariables: Identifiers{ "str", }, @@ -72756,7 +74360,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4929, + context: p6533, freeVariables: Identifiers{ "r", }, @@ -72775,7 +74379,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4929, + context: p6533, freeVariables: Identifiers{ "r", }, @@ -72819,7 +74423,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4929, + context: p6533, freeVariables: Identifiers{ "cur", "out", @@ -72840,7 +74444,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4929, + context: p6533, freeVariables: Identifiers{ "out", }, @@ -72862,7 +74466,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4929, + context: p6533, freeVariables: Identifiers{ "cur", "r", @@ -72883,7 +74487,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4944, + context: p6548, freeVariables: Identifiers{ "cur", }, @@ -72904,7 +74508,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4944, + context: p6548, freeVariables: Identifiers{ "r", }, @@ -72923,7 +74527,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4944, + context: p6548, freeVariables: Identifiers{ "r", }, @@ -72971,7 +74575,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4929, + context: p6533, freeVariables: nil, }, Value: "", @@ -72999,7 +74603,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4856, + context: p6460, freeVariables: Identifiers{ "c", "cur", @@ -73023,7 +74627,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4856, + context: p6460, freeVariables: Identifiers{ "parse_codes", }, @@ -73046,7 +74650,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4958, + context: p6562, freeVariables: Identifiers{ "str", }, @@ -73067,7 +74671,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4958, + context: p6562, freeVariables: Identifiers{ "i", }, @@ -73086,7 +74690,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4958, + context: p6562, freeVariables: Identifiers{ "i", }, @@ -73108,7 +74712,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4958, + context: p6562, freeVariables: nil, }, Value: float64(1), @@ -73129,7 +74733,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4958, + context: p6562, freeVariables: Identifiers{ "out", }, @@ -73150,7 +74754,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4958, + context: p6562, freeVariables: Identifiers{ "c", "cur", @@ -73170,7 +74774,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4958, + context: p6562, freeVariables: Identifiers{ "cur", }, @@ -73192,7 +74796,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4958, + context: p6562, freeVariables: Identifiers{ "c", }, @@ -73227,7 +74831,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3236, + context: p4840, freeVariables: Identifiers{ "parse_codes", "std", @@ -73252,7 +74856,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4977, + context: p6581, freeVariables: Identifiers{ "parse_codes", "str", @@ -73272,7 +74876,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4977, + context: p6581, freeVariables: Identifiers{ "parse_codes", }, @@ -73295,7 +74899,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4983, + context: p6587, freeVariables: Identifiers{ "str", }, @@ -73316,7 +74920,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4983, + context: p6587, freeVariables: nil, }, Value: float64(0), @@ -73336,7 +74940,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4983, + context: p6587, freeVariables: nil, }, Elements: nil, @@ -73356,7 +74960,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4983, + context: p6587, freeVariables: nil, }, Value: "", @@ -73386,7 +74990,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3236, + context: p4840, freeVariables: Identifiers{ "codes", "std", @@ -73410,7 +75014,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4992, + context: p6596, freeVariables: nil, }, Parameters: Parameters{ @@ -73435,7 +75039,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4995, + context: p6599, freeVariables: Identifiers{ "s", "w", @@ -73458,7 +75062,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4999, + context: p6603, freeVariables: Identifiers{ "aux", "s", @@ -73486,7 +75090,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5003, + context: p6607, freeVariables: Identifiers{ "aux", "s", @@ -73508,7 +75112,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5003, + context: p6607, freeVariables: Identifiers{ "w", }, @@ -73527,7 +75131,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5003, + context: p6607, freeVariables: Identifiers{ "w", }, @@ -73549,7 +75153,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5003, + context: p6607, freeVariables: nil, }, Value: float64(0), @@ -73570,7 +75174,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5003, + context: p6607, freeVariables: Identifiers{ "v", }, @@ -73591,7 +75195,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5003, + context: p6607, freeVariables: Identifiers{ "aux", "s", @@ -73613,7 +75217,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5003, + context: p6607, freeVariables: Identifiers{ "aux", }, @@ -73636,7 +75240,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5018, + context: p6622, freeVariables: Identifiers{ "w", }, @@ -73655,7 +75259,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5018, + context: p6622, freeVariables: Identifiers{ "w", }, @@ -73677,7 +75281,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5018, + context: p6622, freeVariables: nil, }, Value: float64(1), @@ -73698,7 +75302,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5018, + context: p6622, freeVariables: Identifiers{ "s", "v", @@ -73718,7 +75322,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5018, + context: p6622, freeVariables: Identifiers{ "v", }, @@ -73740,7 +75344,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5018, + context: p6622, freeVariables: Identifiers{ "s", }, @@ -73773,7 +75377,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4995, + context: p6599, freeVariables: Identifiers{ "aux", "w", @@ -73793,7 +75397,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p4995, + context: p6599, freeVariables: Identifiers{ "aux", }, @@ -73816,7 +75420,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5035, + context: p6639, freeVariables: Identifiers{ "w", }, @@ -73837,7 +75441,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5035, + context: p6639, freeVariables: nil, }, Value: "", @@ -73869,7 +75473,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3236, + context: p4840, freeVariables: Identifiers{ "codes", "padding", @@ -73894,7 +75498,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5042, + context: p6646, freeVariables: Identifiers{ "padding", "std", @@ -73923,7 +75527,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5046, + context: p6650, freeVariables: Identifiers{ "padding", "s", @@ -73946,7 +75550,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5046, + context: p6650, freeVariables: Identifiers{ "padding", "s", @@ -73969,7 +75573,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5046, + context: p6650, freeVariables: Identifiers{ "padding", }, @@ -73992,7 +75596,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5054, + context: p6658, freeVariables: Identifiers{ "std", "str", @@ -74013,7 +75617,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5054, + context: p6658, freeVariables: Identifiers{ "w", }, @@ -74035,7 +75639,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5054, + context: p6658, freeVariables: Identifiers{ "std", "str", @@ -74055,7 +75659,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5054, + context: p6658, freeVariables: Identifiers{ "std", }, @@ -74074,7 +75678,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5054, + context: p6658, freeVariables: Identifiers{ "std", }, @@ -74120,7 +75724,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5067, + context: p6671, freeVariables: Identifiers{ "str", }, @@ -74148,7 +75752,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5054, + context: p6658, freeVariables: Identifiers{ "s", }, @@ -74176,7 +75780,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5046, + context: p6650, freeVariables: Identifiers{ "str", }, @@ -74202,7 +75806,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3236, + context: p4840, freeVariables: Identifiers{ "codes", "pad_left", @@ -74228,7 +75832,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5077, + context: p6681, freeVariables: Identifiers{ "padding", "std", @@ -74257,7 +75861,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5081, + context: p6685, freeVariables: Identifiers{ "padding", "s", @@ -74280,7 +75884,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5081, + context: p6685, freeVariables: Identifiers{ "str", }, @@ -74302,7 +75906,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5081, + context: p6685, freeVariables: Identifiers{ "padding", "s", @@ -74325,7 +75929,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5081, + context: p6685, freeVariables: Identifiers{ "padding", }, @@ -74348,7 +75952,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5091, + context: p6695, freeVariables: Identifiers{ "std", "str", @@ -74369,7 +75973,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5091, + context: p6695, freeVariables: Identifiers{ "w", }, @@ -74391,7 +75995,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5091, + context: p6695, freeVariables: Identifiers{ "std", "str", @@ -74411,7 +76015,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5091, + context: p6695, freeVariables: Identifiers{ "std", }, @@ -74430,7 +76034,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5091, + context: p6695, freeVariables: Identifiers{ "std", }, @@ -74476,7 +76080,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5104, + context: p6708, freeVariables: Identifiers{ "str", }, @@ -74504,7 +76108,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5091, + context: p6695, freeVariables: Identifiers{ "s", }, @@ -74536,7 +76140,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3236, + context: p4840, freeVariables: Identifiers{ "codes", "pad_left", @@ -74562,7 +76166,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5112, + context: p6716, freeVariables: Identifiers{ "pad_left", "std", @@ -74595,7 +76199,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5116, + context: p6720, freeVariables: Identifiers{ "blank", "min_chars", @@ -74625,7 +76229,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5120, + context: p6724, freeVariables: Identifiers{ "n__", "std", @@ -74645,7 +76249,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5120, + context: p6724, freeVariables: Identifiers{ "std", }, @@ -74664,7 +76268,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5120, + context: p6724, freeVariables: Identifiers{ "std", }, @@ -74710,7 +76314,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5129, + context: p6733, freeVariables: Identifiers{ "n__", }, @@ -74740,7 +76344,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5116, + context: p6720, freeVariables: Identifiers{ "blank", "min_chars", @@ -74771,7 +76375,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5135, + context: p6739, freeVariables: Identifiers{ "aux", "radix", @@ -74800,7 +76404,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5139, + context: p6743, freeVariables: Identifiers{ "aux", "n", @@ -74908,7 +76512,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5139, + context: p6743, freeVariables: Identifiers{ "n", }, @@ -74929,7 +76533,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5139, + context: p6743, freeVariables: nil, }, Value: float64(0), @@ -74955,7 +76559,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5139, + context: p6743, freeVariables: Identifiers{ "zero_prefix", }, @@ -74976,7 +76580,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5139, + context: p6743, freeVariables: Identifiers{ "aux", "n", @@ -74998,7 +76602,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5139, + context: p6743, freeVariables: Identifiers{ "aux", "n", @@ -75020,7 +76624,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5139, + context: p6743, freeVariables: Identifiers{ "aux", }, @@ -75043,7 +76647,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5162, + context: p6766, freeVariables: Identifiers{ "n", "radix", @@ -75064,7 +76668,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5162, + context: p6766, freeVariables: Identifiers{ "std", }, @@ -75083,7 +76687,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5162, + context: p6766, freeVariables: Identifiers{ "std", }, @@ -75129,7 +76733,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5171, + context: p6775, freeVariables: Identifiers{ "n", "radix", @@ -75149,7 +76753,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5171, + context: p6775, freeVariables: Identifiers{ "n", }, @@ -75171,7 +76775,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5171, + context: p6775, freeVariables: Identifiers{ "radix", }, @@ -75292,7 +76896,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5139, + context: p6743, freeVariables: Identifiers{ "n", }, @@ -75313,7 +76917,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5139, + context: p6743, freeVariables: Identifiers{ "radix", }, @@ -75346,7 +76950,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5116, + context: p6720, freeVariables: Identifiers{ "aux", "blank", @@ -75376,7 +76980,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5193, + context: p6797, freeVariables: Identifiers{ "aux", "n_", @@ -75482,7 +77086,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5193, + context: p6797, freeVariables: Identifiers{ "n_", "std", @@ -75502,7 +77106,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5193, + context: p6797, freeVariables: Identifiers{ "std", }, @@ -75521,7 +77125,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5193, + context: p6797, freeVariables: Identifiers{ "std", }, @@ -75567,7 +77171,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5212, + context: p6816, freeVariables: Identifiers{ "n_", }, @@ -75594,7 +77198,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5193, + context: p6797, freeVariables: nil, }, Value: float64(0), @@ -75620,7 +77224,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5193, + context: p6797, freeVariables: nil, }, Value: "0", @@ -75641,7 +77245,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5193, + context: p6797, freeVariables: Identifiers{ "aux", "n_", @@ -75662,7 +77266,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5193, + context: p6797, freeVariables: Identifiers{ "aux", }, @@ -75685,7 +77289,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5222, + context: p6826, freeVariables: Identifiers{ "n_", "std", @@ -75705,7 +77309,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5222, + context: p6826, freeVariables: Identifiers{ "std", }, @@ -75724,7 +77328,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5222, + context: p6826, freeVariables: Identifiers{ "std", }, @@ -75770,7 +77374,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5231, + context: p6835, freeVariables: Identifiers{ "n_", }, @@ -75807,7 +77411,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5116, + context: p6720, freeVariables: Identifiers{ "blank", "dec", @@ -75836,7 +77440,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5237, + context: p6841, freeVariables: Identifiers{ "n__", }, @@ -75855,7 +77459,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5237, + context: p6841, freeVariables: Identifiers{ "n__", }, @@ -75877,7 +77481,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5237, + context: p6841, freeVariables: nil, }, Value: float64(0), @@ -75901,7 +77505,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5116, + context: p6720, freeVariables: Identifiers{ "blank", "dec", @@ -75930,7 +77534,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5246, + context: p6850, freeVariables: Identifiers{ "blank", "min_chars", @@ -75952,7 +77556,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5246, + context: p6850, freeVariables: Identifiers{ "min_chars", }, @@ -75974,7 +77578,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5246, + context: p6850, freeVariables: Identifiers{ "blank", "neg", @@ -75995,7 +77599,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5246, + context: p6850, freeVariables: Identifiers{ "blank", "neg", @@ -76016,7 +77620,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5246, + context: p6850, freeVariables: Identifiers{ "blank", "neg", @@ -76036,7 +77640,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5246, + context: p6850, freeVariables: Identifiers{ "neg", }, @@ -76058,7 +77662,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5246, + context: p6850, freeVariables: Identifiers{ "blank", }, @@ -76081,7 +77685,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5246, + context: p6850, freeVariables: Identifiers{ "sign", }, @@ -76103,7 +77707,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5246, + context: p6850, freeVariables: nil, }, Value: float64(1), @@ -76123,7 +77727,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5246, + context: p6850, freeVariables: nil, }, Value: float64(0), @@ -76148,7 +77752,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5116, + context: p6720, freeVariables: Identifiers{ "blank", "dec", @@ -76177,7 +77781,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5268, + context: p6872, freeVariables: Identifiers{ "min_digits", "std", @@ -76198,7 +77802,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5268, + context: p6872, freeVariables: Identifiers{ "std", }, @@ -76217,7 +77821,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5268, + context: p6872, freeVariables: Identifiers{ "std", }, @@ -76263,7 +77867,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5277, + context: p6881, freeVariables: Identifiers{ "zp", }, @@ -76284,7 +77888,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5277, + context: p6881, freeVariables: Identifiers{ "min_digits", }, @@ -76314,7 +77918,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5116, + context: p6720, freeVariables: Identifiers{ "blank", "dec", @@ -76341,7 +77945,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5285, + context: p6889, freeVariables: Identifiers{ "dec", "pad_left", @@ -76362,7 +77966,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5285, + context: p6889, freeVariables: Identifiers{ "pad_left", }, @@ -76385,7 +77989,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5291, + context: p6895, freeVariables: Identifiers{ "dec", }, @@ -76406,7 +78010,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5291, + context: p6895, freeVariables: Identifiers{ "zp2", }, @@ -76427,7 +78031,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5291, + context: p6895, freeVariables: nil, }, Value: "0", @@ -76457,7 +78061,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5116, + context: p6720, freeVariables: Identifiers{ "blank", "dec2", @@ -76479,7 +78083,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5116, + context: p6720, freeVariables: Identifiers{ "blank", "neg", @@ -76500,7 +78104,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5116, + context: p6720, freeVariables: Identifiers{ "neg", }, @@ -76521,7 +78125,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5116, + context: p6720, freeVariables: nil, }, Value: "-", @@ -76542,7 +78146,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5116, + context: p6720, freeVariables: Identifiers{ "blank", "sign", @@ -76562,7 +78166,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5116, + context: p6720, freeVariables: Identifiers{ "sign", }, @@ -76583,7 +78187,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5116, + context: p6720, freeVariables: nil, }, Value: "+", @@ -76604,7 +78208,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5116, + context: p6720, freeVariables: Identifiers{ "blank", }, @@ -76623,7 +78227,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5116, + context: p6720, freeVariables: Identifiers{ "blank", }, @@ -76644,7 +78248,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5116, + context: p6720, freeVariables: nil, }, Value: " ", @@ -76665,7 +78269,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5116, + context: p6720, freeVariables: nil, }, Value: "", @@ -76690,7 +78294,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5116, + context: p6720, freeVariables: Identifiers{ "dec2", }, @@ -76723,7 +78327,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3236, + context: p4840, freeVariables: Identifiers{ "codes", "pad_left", @@ -76750,7 +78354,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5320, + context: p6924, freeVariables: Identifiers{ "pad_left", "std", @@ -76783,7 +78387,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5324, + context: p6928, freeVariables: Identifiers{ "add_zerox", "blank", @@ -76813,7 +78417,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5328, + context: p6932, freeVariables: Identifiers{ "capitals", }, @@ -76832,7 +78436,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5328, + context: p6932, freeVariables: nil, }, Elements: Nodes{ @@ -76850,7 +78454,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5333, + context: p6937, freeVariables: nil, }, Value: float64(0), @@ -76870,7 +78474,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5333, + context: p6937, freeVariables: nil, }, Value: float64(1), @@ -76890,7 +78494,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5333, + context: p6937, freeVariables: nil, }, Value: float64(2), @@ -76910,7 +78514,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5333, + context: p6937, freeVariables: nil, }, Value: float64(3), @@ -76930,7 +78534,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5333, + context: p6937, freeVariables: nil, }, Value: float64(4), @@ -76950,7 +78554,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5333, + context: p6937, freeVariables: nil, }, Value: float64(5), @@ -76970,7 +78574,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5333, + context: p6937, freeVariables: nil, }, Value: float64(6), @@ -76990,7 +78594,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5333, + context: p6937, freeVariables: nil, }, Value: float64(7), @@ -77010,7 +78614,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5333, + context: p6937, freeVariables: nil, }, Value: float64(8), @@ -77030,7 +78634,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5333, + context: p6937, freeVariables: nil, }, Value: float64(9), @@ -77054,7 +78658,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5328, + context: p6932, freeVariables: Identifiers{ "capitals", }, @@ -77073,7 +78677,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5328, + context: p6932, freeVariables: Identifiers{ "capitals", }, @@ -77094,7 +78698,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5328, + context: p6932, freeVariables: nil, }, Elements: Nodes{ @@ -77112,7 +78716,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5350, + context: p6954, freeVariables: nil, }, Value: "A", @@ -77133,7 +78737,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5350, + context: p6954, freeVariables: nil, }, Value: "B", @@ -77154,7 +78758,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5350, + context: p6954, freeVariables: nil, }, Value: "C", @@ -77175,7 +78779,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5350, + context: p6954, freeVariables: nil, }, Value: "D", @@ -77196,7 +78800,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5350, + context: p6954, freeVariables: nil, }, Value: "E", @@ -77217,7 +78821,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5350, + context: p6954, freeVariables: nil, }, Value: "F", @@ -77241,7 +78845,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5328, + context: p6932, freeVariables: nil, }, Elements: Nodes{ @@ -77259,7 +78863,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5359, + context: p6963, freeVariables: nil, }, Value: "a", @@ -77280,7 +78884,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5359, + context: p6963, freeVariables: nil, }, Value: "b", @@ -77301,7 +78905,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5359, + context: p6963, freeVariables: nil, }, Value: "c", @@ -77322,7 +78926,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5359, + context: p6963, freeVariables: nil, }, Value: "d", @@ -77343,7 +78947,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5359, + context: p6963, freeVariables: nil, }, Value: "e", @@ -77364,7 +78968,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5359, + context: p6963, freeVariables: nil, }, Value: "f", @@ -77393,7 +78997,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5324, + context: p6928, freeVariables: Identifiers{ "add_zerox", "blank", @@ -77424,7 +79028,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5369, + context: p6973, freeVariables: Identifiers{ "n__", "std", @@ -77444,7 +79048,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5369, + context: p6973, freeVariables: Identifiers{ "std", }, @@ -77463,7 +79067,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5369, + context: p6973, freeVariables: Identifiers{ "std", }, @@ -77509,7 +79113,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5378, + context: p6982, freeVariables: Identifiers{ "n__", }, @@ -77539,7 +79143,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5324, + context: p6928, freeVariables: Identifiers{ "add_zerox", "blank", @@ -77571,7 +79175,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5384, + context: p6988, freeVariables: Identifiers{ "aux", "numerals", @@ -77599,7 +79203,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5388, + context: p6992, freeVariables: Identifiers{ "aux", "n", @@ -77706,7 +79310,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5388, + context: p6992, freeVariables: Identifiers{ "n", }, @@ -77727,7 +79331,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5388, + context: p6992, freeVariables: nil, }, Value: float64(0), @@ -77753,7 +79357,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5388, + context: p6992, freeVariables: nil, }, Value: "", @@ -77774,7 +79378,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5388, + context: p6992, freeVariables: Identifiers{ "aux", "n", @@ -77796,7 +79400,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5388, + context: p6992, freeVariables: Identifiers{ "aux", "n", @@ -77817,7 +79421,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5388, + context: p6992, freeVariables: Identifiers{ "aux", }, @@ -77840,7 +79444,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5410, + context: p7014, freeVariables: Identifiers{ "n", "std", @@ -77860,7 +79464,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5410, + context: p7014, freeVariables: Identifiers{ "std", }, @@ -77879,7 +79483,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5410, + context: p7014, freeVariables: Identifiers{ "std", }, @@ -77925,7 +79529,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5419, + context: p7023, freeVariables: Identifiers{ "n", }, @@ -77944,7 +79548,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5419, + context: p7023, freeVariables: Identifiers{ "n", }, @@ -77966,7 +79570,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5419, + context: p7023, freeVariables: nil, }, Value: float64(16), @@ -78000,7 +79604,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5388, + context: p6992, freeVariables: Identifiers{ "n", "numerals", @@ -78021,7 +79625,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5388, + context: p6992, freeVariables: Identifiers{ "numerals", }, @@ -78127,7 +79731,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5388, + context: p6992, freeVariables: Identifiers{ "n", }, @@ -78148,7 +79752,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5388, + context: p6992, freeVariables: nil, }, Value: float64(16), @@ -78182,7 +79786,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5324, + context: p6928, freeVariables: Identifiers{ "add_zerox", "aux", @@ -78214,7 +79818,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5443, + context: p7047, freeVariables: Identifiers{ "aux", "n_", @@ -78320,7 +79924,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5443, + context: p7047, freeVariables: Identifiers{ "n_", "std", @@ -78340,7 +79944,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5443, + context: p7047, freeVariables: Identifiers{ "std", }, @@ -78359,7 +79963,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5443, + context: p7047, freeVariables: Identifiers{ "std", }, @@ -78405,7 +80009,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5462, + context: p7066, freeVariables: Identifiers{ "n_", }, @@ -78432,7 +80036,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5443, + context: p7047, freeVariables: nil, }, Value: float64(0), @@ -78458,7 +80062,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5443, + context: p7047, freeVariables: nil, }, Value: "0", @@ -78479,7 +80083,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5443, + context: p7047, freeVariables: Identifiers{ "aux", "n_", @@ -78500,7 +80104,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5443, + context: p7047, freeVariables: Identifiers{ "aux", }, @@ -78523,7 +80127,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5472, + context: p7076, freeVariables: Identifiers{ "n_", "std", @@ -78543,7 +80147,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5472, + context: p7076, freeVariables: Identifiers{ "std", }, @@ -78562,7 +80166,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5472, + context: p7076, freeVariables: Identifiers{ "std", }, @@ -78608,7 +80212,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5481, + context: p7085, freeVariables: Identifiers{ "n_", }, @@ -78645,7 +80249,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5324, + context: p6928, freeVariables: Identifiers{ "add_zerox", "blank", @@ -78676,7 +80280,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5487, + context: p7091, freeVariables: Identifiers{ "n__", }, @@ -78695,7 +80299,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5487, + context: p7091, freeVariables: Identifiers{ "n__", }, @@ -78717,7 +80321,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5487, + context: p7091, freeVariables: nil, }, Value: float64(0), @@ -78741,7 +80345,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5324, + context: p6928, freeVariables: Identifiers{ "add_zerox", "blank", @@ -78772,7 +80376,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5496, + context: p7100, freeVariables: Identifiers{ "add_zerox", "blank", @@ -78795,7 +80399,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5496, + context: p7100, freeVariables: Identifiers{ "blank", "min_chars", @@ -78817,7 +80421,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5496, + context: p7100, freeVariables: Identifiers{ "min_chars", }, @@ -78839,7 +80443,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5496, + context: p7100, freeVariables: Identifiers{ "blank", "neg", @@ -78860,7 +80464,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5496, + context: p7100, freeVariables: Identifiers{ "blank", "neg", @@ -78881,7 +80485,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5496, + context: p7100, freeVariables: Identifiers{ "blank", "neg", @@ -78901,7 +80505,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5496, + context: p7100, freeVariables: Identifiers{ "neg", }, @@ -78923,7 +80527,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5496, + context: p7100, freeVariables: Identifiers{ "blank", }, @@ -78946,7 +80550,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5496, + context: p7100, freeVariables: Identifiers{ "sign", }, @@ -78968,7 +80572,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5496, + context: p7100, freeVariables: nil, }, Value: float64(1), @@ -78988,7 +80592,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5496, + context: p7100, freeVariables: nil, }, Value: float64(0), @@ -79011,7 +80615,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5496, + context: p7100, freeVariables: Identifiers{ "add_zerox", }, @@ -79030,7 +80634,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5496, + context: p7100, freeVariables: Identifiers{ "add_zerox", }, @@ -79051,7 +80655,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5496, + context: p7100, freeVariables: nil, }, Value: float64(2), @@ -79071,7 +80675,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5496, + context: p7100, freeVariables: nil, }, Value: float64(0), @@ -79096,7 +80700,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5324, + context: p6928, freeVariables: Identifiers{ "add_zerox", "blank", @@ -79127,7 +80731,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5526, + context: p7130, freeVariables: Identifiers{ "min_digits", "std", @@ -79148,7 +80752,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5526, + context: p7130, freeVariables: Identifiers{ "std", }, @@ -79167,7 +80771,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5526, + context: p7130, freeVariables: Identifiers{ "std", }, @@ -79213,7 +80817,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5535, + context: p7139, freeVariables: Identifiers{ "zp", }, @@ -79234,7 +80838,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5535, + context: p7139, freeVariables: Identifiers{ "min_digits", }, @@ -79264,7 +80868,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5324, + context: p6928, freeVariables: Identifiers{ "add_zerox", "blank", @@ -79293,7 +80897,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5543, + context: p7147, freeVariables: Identifiers{ "add_zerox", "capitals", @@ -79316,7 +80920,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5543, + context: p7147, freeVariables: Identifiers{ "add_zerox", "capitals", @@ -79336,7 +80940,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5543, + context: p7147, freeVariables: Identifiers{ "add_zerox", }, @@ -79357,7 +80961,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5543, + context: p7147, freeVariables: Identifiers{ "capitals", }, @@ -79376,7 +80980,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5543, + context: p7147, freeVariables: Identifiers{ "capitals", }, @@ -79397,7 +81001,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5543, + context: p7147, freeVariables: nil, }, Value: "0X", @@ -79418,7 +81022,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5543, + context: p7147, freeVariables: nil, }, Value: "0x", @@ -79440,7 +81044,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5543, + context: p7147, freeVariables: nil, }, Value: "", @@ -79463,7 +81067,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5543, + context: p7147, freeVariables: Identifiers{ "hex", "pad_left", @@ -79484,7 +81088,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5543, + context: p7147, freeVariables: Identifiers{ "pad_left", }, @@ -79507,7 +81111,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5562, + context: p7166, freeVariables: Identifiers{ "hex", }, @@ -79528,7 +81132,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5562, + context: p7166, freeVariables: Identifiers{ "zp2", }, @@ -79549,7 +81153,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5562, + context: p7166, freeVariables: nil, }, Value: "0", @@ -79580,7 +81184,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5324, + context: p6928, freeVariables: Identifiers{ "blank", "hex2", @@ -79602,7 +81206,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5324, + context: p6928, freeVariables: Identifiers{ "blank", "neg", @@ -79623,7 +81227,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5324, + context: p6928, freeVariables: Identifiers{ "neg", }, @@ -79644,7 +81248,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5324, + context: p6928, freeVariables: nil, }, Value: "-", @@ -79665,7 +81269,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5324, + context: p6928, freeVariables: Identifiers{ "blank", "sign", @@ -79685,7 +81289,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5324, + context: p6928, freeVariables: Identifiers{ "sign", }, @@ -79706,7 +81310,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5324, + context: p6928, freeVariables: nil, }, Value: "+", @@ -79727,7 +81331,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5324, + context: p6928, freeVariables: Identifiers{ "blank", }, @@ -79746,7 +81350,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5324, + context: p6928, freeVariables: Identifiers{ "blank", }, @@ -79767,7 +81371,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5324, + context: p6928, freeVariables: nil, }, Value: " ", @@ -79788,7 +81392,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5324, + context: p6928, freeVariables: nil, }, Value: "", @@ -79813,7 +81417,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5324, + context: p6928, freeVariables: Identifiers{ "hex2", }, @@ -79847,7 +81451,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3236, + context: p4840, freeVariables: Identifiers{ "codes", "pad_left", @@ -79875,7 +81479,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5591, + context: p7195, freeVariables: Identifiers{ "std", }, @@ -79901,7 +81505,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5595, + context: p7199, freeVariables: Identifiers{ "std", "str", @@ -79924,7 +81528,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5599, + context: p7203, freeVariables: Identifiers{ "aux", "std", @@ -79952,7 +81556,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5603, + context: p7207, freeVariables: Identifiers{ "aux", "i", @@ -79974,7 +81578,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5603, + context: p7207, freeVariables: Identifiers{ "i", }, @@ -79993,7 +81597,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5603, + context: p7207, freeVariables: Identifiers{ "i", }, @@ -80015,7 +81619,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5603, + context: p7207, freeVariables: nil, }, Value: float64(0), @@ -80036,7 +81640,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5603, + context: p7207, freeVariables: nil, }, Value: "", @@ -80057,7 +81661,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5603, + context: p7207, freeVariables: Identifiers{ "aux", "i", @@ -80165,7 +81769,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5603, + context: p7207, freeVariables: Identifiers{ "i", "str", @@ -80185,7 +81789,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5603, + context: p7207, freeVariables: Identifiers{ "str", }, @@ -80206,7 +81810,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5603, + context: p7207, freeVariables: Identifiers{ "i", }, @@ -80229,7 +81833,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5603, + context: p7207, freeVariables: nil, }, Value: "0", @@ -80256,7 +81860,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5603, + context: p7207, freeVariables: Identifiers{ "aux", "i", @@ -80277,7 +81881,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5603, + context: p7207, freeVariables: Identifiers{ "aux", }, @@ -80300,7 +81904,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5634, + context: p7238, freeVariables: Identifiers{ "str", }, @@ -80321,7 +81925,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5634, + context: p7238, freeVariables: Identifiers{ "i", }, @@ -80340,7 +81944,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5634, + context: p7238, freeVariables: Identifiers{ "i", }, @@ -80362,7 +81966,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5634, + context: p7238, freeVariables: nil, }, Value: float64(1), @@ -80389,7 +81993,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5603, + context: p7207, freeVariables: Identifiers{ "i", "std", @@ -80410,7 +82014,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5603, + context: p7207, freeVariables: Identifiers{ "std", }, @@ -80429,7 +82033,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5603, + context: p7207, freeVariables: Identifiers{ "std", }, @@ -80475,7 +82079,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5650, + context: p7254, freeVariables: Identifiers{ "str", }, @@ -80496,7 +82100,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5650, + context: p7254, freeVariables: nil, }, Value: float64(0), @@ -80516,7 +82120,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5650, + context: p7254, freeVariables: Identifiers{ "i", }, @@ -80535,7 +82139,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5650, + context: p7254, freeVariables: Identifiers{ "i", }, @@ -80557,7 +82161,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5650, + context: p7254, freeVariables: nil, }, Value: float64(1), @@ -80590,7 +82194,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5595, + context: p7199, freeVariables: Identifiers{ "aux", "std", @@ -80611,7 +82215,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5595, + context: p7199, freeVariables: Identifiers{ "aux", }, @@ -80634,7 +82238,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5664, + context: p7268, freeVariables: Identifiers{ "str", }, @@ -80655,7 +82259,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5664, + context: p7268, freeVariables: Identifiers{ "std", "str", @@ -80675,7 +82279,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5664, + context: p7268, freeVariables: Identifiers{ "std", "str", @@ -80695,7 +82299,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5664, + context: p7268, freeVariables: Identifiers{ "std", }, @@ -80714,7 +82318,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5664, + context: p7268, freeVariables: Identifiers{ "std", }, @@ -80760,7 +82364,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5677, + context: p7281, freeVariables: Identifiers{ "str", }, @@ -80788,7 +82392,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5664, + context: p7268, freeVariables: nil, }, Value: float64(1), @@ -80820,7 +82424,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3236, + context: p4840, freeVariables: Identifiers{ "codes", "pad_left", @@ -80849,7 +82453,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5684, + context: p7288, freeVariables: Identifiers{ "render_int", "std", @@ -80883,7 +82487,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5688, + context: p7292, freeVariables: Identifiers{ "blank", "ensure_pt", @@ -80914,7 +82518,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5692, + context: p7296, freeVariables: Identifiers{ "n__", "std", @@ -80934,7 +82538,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5692, + context: p7296, freeVariables: Identifiers{ "std", }, @@ -80953,7 +82557,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5692, + context: p7296, freeVariables: Identifiers{ "std", }, @@ -80999,7 +82603,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5701, + context: p7305, freeVariables: Identifiers{ "n__", }, @@ -81029,7 +82633,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5688, + context: p7292, freeVariables: Identifiers{ "blank", "ensure_pt", @@ -81061,7 +82665,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5707, + context: p7311, freeVariables: Identifiers{ "n_", "std", @@ -81081,7 +82685,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5707, + context: p7311, freeVariables: Identifiers{ "std", }, @@ -81100,7 +82704,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5707, + context: p7311, freeVariables: Identifiers{ "std", }, @@ -81146,7 +82750,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5716, + context: p7320, freeVariables: Identifiers{ "n_", }, @@ -81176,7 +82780,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5688, + context: p7292, freeVariables: Identifiers{ "blank", "ensure_pt", @@ -81209,7 +82813,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5722, + context: p7326, freeVariables: Identifiers{ "ensure_pt", "prec", @@ -81230,7 +82834,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5722, + context: p7326, freeVariables: Identifiers{ "ensure_pt", "prec", @@ -81336,7 +82940,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5722, + context: p7326, freeVariables: Identifiers{ "prec", }, @@ -81357,7 +82961,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5722, + context: p7326, freeVariables: nil, }, Value: float64(0), @@ -81384,7 +82988,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5722, + context: p7326, freeVariables: Identifiers{ "ensure_pt", }, @@ -81404,7 +83008,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5722, + context: p7326, freeVariables: Identifiers{ "ensure_pt", }, @@ -81427,7 +83031,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5722, + context: p7326, freeVariables: nil, }, Value: float64(0), @@ -81447,7 +83051,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5722, + context: p7326, freeVariables: nil, }, Value: float64(1), @@ -81471,7 +83075,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5688, + context: p7292, freeVariables: Identifiers{ "blank", "dot_size", @@ -81505,7 +83109,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5747, + context: p7351, freeVariables: Identifiers{ "dot_size", "prec", @@ -81526,7 +83130,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5747, + context: p7351, freeVariables: Identifiers{ "prec", "zero_pad", @@ -81546,7 +83150,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5747, + context: p7351, freeVariables: Identifiers{ "zero_pad", }, @@ -81568,7 +83172,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5747, + context: p7351, freeVariables: Identifiers{ "prec", }, @@ -81591,7 +83195,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5747, + context: p7351, freeVariables: Identifiers{ "dot_size", }, @@ -81616,7 +83220,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5688, + context: p7292, freeVariables: Identifiers{ "blank", "ensure_pt", @@ -81649,7 +83253,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5761, + context: p7365, freeVariables: Identifiers{ "blank", "n__", @@ -81674,7 +83278,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5761, + context: p7365, freeVariables: Identifiers{ "render_int", }, @@ -81697,7 +83301,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5767, + context: p7371, freeVariables: Identifiers{ "n__", "std", @@ -81718,7 +83322,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5767, + context: p7371, freeVariables: Identifiers{ "n__", "std", @@ -81738,7 +83342,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5767, + context: p7371, freeVariables: Identifiers{ "std", }, @@ -81757,7 +83361,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5767, + context: p7371, freeVariables: Identifiers{ "std", }, @@ -81803,7 +83407,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5778, + context: p7382, freeVariables: Identifiers{ "n__", }, @@ -81831,7 +83435,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5767, + context: p7371, freeVariables: Identifiers{ "whole", }, @@ -81853,7 +83457,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5767, + context: p7371, freeVariables: Identifiers{ "zp", }, @@ -81874,7 +83478,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5767, + context: p7371, freeVariables: nil, }, Value: float64(0), @@ -81894,7 +83498,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5767, + context: p7371, freeVariables: Identifiers{ "blank", }, @@ -81915,7 +83519,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5767, + context: p7371, freeVariables: Identifiers{ "sign", }, @@ -81936,7 +83540,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5767, + context: p7371, freeVariables: nil, }, Value: float64(10), @@ -81956,7 +83560,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5767, + context: p7371, freeVariables: nil, }, Value: "", @@ -81986,7 +83590,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5688, + context: p7292, freeVariables: Identifiers{ "ensure_pt", "n_", @@ -82098,7 +83702,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5688, + context: p7292, freeVariables: Identifiers{ "prec", }, @@ -82119,7 +83723,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5688, + context: p7292, freeVariables: nil, }, Value: float64(0), @@ -82145,7 +83749,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5688, + context: p7292, freeVariables: Identifiers{ "ensure_pt", "str", @@ -82165,7 +83769,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5688, + context: p7292, freeVariables: Identifiers{ "str", }, @@ -82187,7 +83791,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5688, + context: p7292, freeVariables: Identifiers{ "ensure_pt", }, @@ -82206,7 +83810,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5688, + context: p7292, freeVariables: Identifiers{ "ensure_pt", }, @@ -82227,7 +83831,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5688, + context: p7292, freeVariables: nil, }, Value: ".", @@ -82248,7 +83852,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5688, + context: p7292, freeVariables: nil, }, Value: "", @@ -82271,7 +83875,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5688, + context: p7292, freeVariables: Identifiers{ "n_", "prec", @@ -82300,7 +83904,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5818, + context: p7422, freeVariables: Identifiers{ "n_", "prec", @@ -82322,7 +83926,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5818, + context: p7422, freeVariables: Identifiers{ "std", }, @@ -82341,7 +83945,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5818, + context: p7422, freeVariables: Identifiers{ "std", }, @@ -82387,7 +83991,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5827, + context: p7431, freeVariables: Identifiers{ "n_", "prec", @@ -82409,7 +84013,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5827, + context: p7431, freeVariables: Identifiers{ "n_", "prec", @@ -82431,7 +84035,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5827, + context: p7431, freeVariables: Identifiers{ "n_", "whole", @@ -82451,7 +84055,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5827, + context: p7431, freeVariables: Identifiers{ "n_", }, @@ -82473,7 +84077,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5827, + context: p7431, freeVariables: Identifiers{ "whole", }, @@ -82496,7 +84100,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5827, + context: p7431, freeVariables: Identifiers{ "prec", "std", @@ -82516,7 +84120,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5827, + context: p7431, freeVariables: Identifiers{ "std", }, @@ -82535,7 +84139,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5827, + context: p7431, freeVariables: Identifiers{ "std", }, @@ -82581,7 +84185,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5846, + context: p7450, freeVariables: nil, }, Value: float64(10), @@ -82601,7 +84205,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5846, + context: p7450, freeVariables: Identifiers{ "prec", }, @@ -82630,7 +84234,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5827, + context: p7431, freeVariables: nil, }, Value: float64(0.5), @@ -82660,7 +84264,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5688, + context: p7292, freeVariables: Identifiers{ "frac", "prec", @@ -82684,7 +84288,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5688, + context: p7292, freeVariables: Identifiers{ "frac", "trailing", @@ -82704,7 +84308,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5688, + context: p7292, freeVariables: Identifiers{ "trailing", }, @@ -82726,7 +84330,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5688, + context: p7292, freeVariables: Identifiers{ "frac", }, @@ -82745,7 +84349,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5688, + context: p7292, freeVariables: Identifiers{ "frac", }, @@ -82767,7 +84371,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5688, + context: p7292, freeVariables: nil, }, Value: float64(0), @@ -82789,7 +84393,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5688, + context: p7292, freeVariables: Identifiers{ "frac", "prec", @@ -82816,7 +84420,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5865, + context: p7469, freeVariables: Identifiers{ "frac", "prec", @@ -82837,7 +84441,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5865, + context: p7469, freeVariables: Identifiers{ "render_int", }, @@ -82860,7 +84464,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5871, + context: p7475, freeVariables: Identifiers{ "frac", }, @@ -82881,7 +84485,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5871, + context: p7475, freeVariables: Identifiers{ "prec", }, @@ -82902,7 +84506,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5871, + context: p7475, freeVariables: nil, }, Value: float64(0), @@ -82922,7 +84526,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5871, + context: p7475, freeVariables: nil, }, Value: false, @@ -82941,7 +84545,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5871, + context: p7475, freeVariables: nil, }, Value: false, @@ -82960,7 +84564,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5871, + context: p7475, freeVariables: nil, }, Value: float64(10), @@ -82980,7 +84584,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5871, + context: p7475, freeVariables: nil, }, Value: "", @@ -83010,7 +84614,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5688, + context: p7292, freeVariables: Identifiers{ "frac_str", "str", @@ -83032,7 +84636,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5688, + context: p7292, freeVariables: Identifiers{ "str", }, @@ -83051,7 +84655,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5688, + context: p7292, freeVariables: Identifiers{ "str", }, @@ -83073,7 +84677,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5688, + context: p7292, freeVariables: nil, }, Value: ".", @@ -83096,7 +84700,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5688, + context: p7292, freeVariables: Identifiers{ "frac_str", "strip_trailing_zero", @@ -83117,7 +84721,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5688, + context: p7292, freeVariables: Identifiers{ "trailing", }, @@ -83137,7 +84741,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5688, + context: p7292, freeVariables: Identifiers{ "trailing", }, @@ -83159,7 +84763,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5688, + context: p7292, freeVariables: Identifiers{ "frac_str", "strip_trailing_zero", @@ -83179,7 +84783,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5688, + context: p7292, freeVariables: Identifiers{ "strip_trailing_zero", }, @@ -83202,7 +84806,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5899, + context: p7503, freeVariables: Identifiers{ "frac_str", }, @@ -83229,7 +84833,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5688, + context: p7292, freeVariables: Identifiers{ "frac_str", }, @@ -83253,7 +84857,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5688, + context: p7292, freeVariables: Identifiers{ "str", }, @@ -83286,7 +84890,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3236, + context: p4840, freeVariables: Identifiers{ "codes", "pad_left", @@ -83315,7 +84919,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5909, + context: p7513, freeVariables: Identifiers{ "render_float_dec", "render_int", @@ -83350,7 +84954,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5913, + context: p7517, freeVariables: Identifiers{ "blank", "caps", @@ -83382,7 +84986,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5917, + context: p7521, freeVariables: Identifiers{ "n__", "std", @@ -83402,7 +85006,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5917, + context: p7521, freeVariables: Identifiers{ "std", }, @@ -83421,7 +85025,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5917, + context: p7521, freeVariables: Identifiers{ "std", }, @@ -83467,7 +85071,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5926, + context: p7530, freeVariables: Identifiers{ "n__", "std", @@ -83487,7 +85091,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5926, + context: p7530, freeVariables: Identifiers{ "n__", "std", @@ -83507,7 +85111,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5926, + context: p7530, freeVariables: Identifiers{ "std", }, @@ -83526,7 +85130,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5926, + context: p7530, freeVariables: Identifiers{ "std", }, @@ -83572,7 +85176,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5937, + context: p7541, freeVariables: Identifiers{ "n__", "std", @@ -83592,7 +85196,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5937, + context: p7541, freeVariables: Identifiers{ "std", }, @@ -83611,7 +85215,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5937, + context: p7541, freeVariables: Identifiers{ "std", }, @@ -83657,7 +85261,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5946, + context: p7550, freeVariables: Identifiers{ "n__", }, @@ -83691,7 +85295,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5926, + context: p7530, freeVariables: Identifiers{ "std", }, @@ -83710,7 +85314,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5926, + context: p7530, freeVariables: Identifiers{ "std", }, @@ -83729,7 +85333,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5926, + context: p7530, freeVariables: Identifiers{ "std", }, @@ -83775,7 +85379,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5957, + context: p7561, freeVariables: nil, }, Value: float64(10), @@ -83811,7 +85415,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5913, + context: p7517, freeVariables: Identifiers{ "blank", "caps", @@ -83844,7 +85448,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5962, + context: p7566, freeVariables: Identifiers{ "caps", "exponent", @@ -83865,7 +85469,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5962, + context: p7566, freeVariables: Identifiers{ "caps", }, @@ -83884,7 +85488,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5962, + context: p7566, freeVariables: Identifiers{ "caps", }, @@ -83905,7 +85509,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5962, + context: p7566, freeVariables: nil, }, Value: "E", @@ -83926,7 +85530,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5962, + context: p7566, freeVariables: nil, }, Value: "e", @@ -83949,7 +85553,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5962, + context: p7566, freeVariables: Identifiers{ "exponent", "render_int", @@ -83969,7 +85573,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5962, + context: p7566, freeVariables: Identifiers{ "render_int", }, @@ -83992,7 +85596,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5976, + context: p7580, freeVariables: Identifiers{ "exponent", }, @@ -84013,7 +85617,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5976, + context: p7580, freeVariables: nil, }, Value: float64(3), @@ -84033,7 +85637,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5976, + context: p7580, freeVariables: nil, }, Value: float64(0), @@ -84053,7 +85657,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5976, + context: p7580, freeVariables: nil, }, Value: false, @@ -84072,7 +85676,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5976, + context: p7580, freeVariables: nil, }, Value: true, @@ -84091,7 +85695,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5976, + context: p7580, freeVariables: nil, }, Value: float64(10), @@ -84111,7 +85715,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5976, + context: p7580, freeVariables: nil, }, Value: "", @@ -84142,7 +85746,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5913, + context: p7517, freeVariables: Identifiers{ "blank", "ensure_pt", @@ -84174,7 +85778,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5988, + context: p7592, freeVariables: Identifiers{ "exponent", "n__", @@ -84195,7 +85799,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5988, + context: p7592, freeVariables: Identifiers{ "n__", }, @@ -84217,7 +85821,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5988, + context: p7592, freeVariables: Identifiers{ "exponent", "std", @@ -84237,7 +85841,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5988, + context: p7592, freeVariables: Identifiers{ "std", }, @@ -84256,7 +85860,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5988, + context: p7592, freeVariables: Identifiers{ "std", }, @@ -84302,7 +85906,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6001, + context: p7605, freeVariables: nil, }, Value: float64(10), @@ -84322,7 +85926,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6001, + context: p7605, freeVariables: Identifiers{ "exponent", }, @@ -84353,7 +85957,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5913, + context: p7517, freeVariables: Identifiers{ "blank", "ensure_pt", @@ -84384,7 +85988,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6008, + context: p7612, freeVariables: Identifiers{ "std", "suff", @@ -84405,7 +86009,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6008, + context: p7612, freeVariables: Identifiers{ "zero_pad", }, @@ -84427,7 +86031,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6008, + context: p7612, freeVariables: Identifiers{ "std", "suff", @@ -84447,7 +86051,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6008, + context: p7612, freeVariables: Identifiers{ "std", }, @@ -84466,7 +86070,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6008, + context: p7612, freeVariables: Identifiers{ "std", }, @@ -84512,7 +86116,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6021, + context: p7625, freeVariables: Identifiers{ "suff", }, @@ -84543,7 +86147,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5913, + context: p7517, freeVariables: Identifiers{ "blank", "ensure_pt", @@ -84570,7 +86174,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5913, + context: p7517, freeVariables: Identifiers{ "blank", "ensure_pt", @@ -84596,7 +86200,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5913, + context: p7517, freeVariables: Identifiers{ "render_float_dec", }, @@ -84619,7 +86223,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6031, + context: p7635, freeVariables: Identifiers{ "mantissa", }, @@ -84640,7 +86244,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6031, + context: p7635, freeVariables: Identifiers{ "zp2", }, @@ -84661,7 +86265,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6031, + context: p7635, freeVariables: Identifiers{ "blank", }, @@ -84682,7 +86286,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6031, + context: p7635, freeVariables: Identifiers{ "sign", }, @@ -84703,7 +86307,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6031, + context: p7635, freeVariables: Identifiers{ "ensure_pt", }, @@ -84724,7 +86328,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6031, + context: p7635, freeVariables: Identifiers{ "trailing", }, @@ -84745,7 +86349,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6031, + context: p7635, freeVariables: Identifiers{ "prec", }, @@ -84773,7 +86377,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p5913, + context: p7517, freeVariables: Identifiers{ "suff", }, @@ -84803,7 +86407,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3236, + context: p4840, freeVariables: Identifiers{ "codes", "pad_left", @@ -84833,7 +86437,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6051, + context: p7655, freeVariables: Identifiers{ "render_float_dec", "render_float_sci", @@ -84867,7 +86471,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6055, + context: p7659, freeVariables: Identifiers{ "code", "fw", @@ -84898,7 +86502,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6059, + context: p7663, freeVariables: Identifiers{ "code", }, @@ -84917,7 +86521,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6059, + context: p7663, freeVariables: Identifiers{ "code", }, @@ -84964,7 +86568,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6055, + context: p7659, freeVariables: Identifiers{ "cflags", "code", @@ -84996,7 +86600,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6068, + context: p7672, freeVariables: Identifiers{ "prec_or_null", "std", @@ -85122,7 +86726,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6068, + context: p7672, freeVariables: Identifiers{ "prec_or_null", }, @@ -85143,7 +86747,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6068, + context: p7672, freeVariables: nil, }, }, @@ -85168,7 +86772,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6068, + context: p7672, freeVariables: Identifiers{ "prec_or_null", }, @@ -85189,7 +86793,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6068, + context: p7672, freeVariables: nil, }, Value: float64(6), @@ -85213,7 +86817,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6055, + context: p7659, freeVariables: Identifiers{ "cflags", "code", @@ -85246,7 +86850,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6090, + context: p7694, freeVariables: Identifiers{ "prec_or_null", "std", @@ -85372,7 +86976,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6090, + context: p7694, freeVariables: Identifiers{ "prec_or_null", }, @@ -85393,7 +86997,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6090, + context: p7694, freeVariables: nil, }, }, @@ -85418,7 +87022,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6090, + context: p7694, freeVariables: Identifiers{ "prec_or_null", }, @@ -85439,7 +87043,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6090, + context: p7694, freeVariables: nil, }, Value: float64(0), @@ -85463,7 +87067,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6055, + context: p7659, freeVariables: Identifiers{ "cflags", "code", @@ -85496,7 +87100,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6112, + context: p7716, freeVariables: Identifiers{ "cflags", "fw", @@ -85516,7 +87120,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6112, + context: p7716, freeVariables: Identifiers{ "cflags", }, @@ -85535,7 +87139,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6112, + context: p7716, freeVariables: Identifiers{ "cflags", }, @@ -85554,7 +87158,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6112, + context: p7716, freeVariables: Identifiers{ "cflags", }, @@ -85599,7 +87203,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6112, + context: p7716, freeVariables: Identifiers{ "cflags", }, @@ -85619,7 +87223,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6112, + context: p7716, freeVariables: Identifiers{ "cflags", }, @@ -85638,7 +87242,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6112, + context: p7716, freeVariables: Identifiers{ "cflags", }, @@ -85684,7 +87288,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6112, + context: p7716, freeVariables: Identifiers{ "fw", }, @@ -85705,7 +87309,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6112, + context: p7716, freeVariables: nil, }, Value: float64(0), @@ -85729,7 +87333,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6055, + context: p7659, freeVariables: Identifiers{ "cflags", "code", @@ -85844,7 +87448,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6055, + context: p7659, freeVariables: Identifiers{ "code", }, @@ -85863,7 +87467,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6055, + context: p7659, freeVariables: Identifiers{ "code", }, @@ -85907,7 +87511,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6055, + context: p7659, freeVariables: nil, }, Value: "s", @@ -85934,7 +87538,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6055, + context: p7659, freeVariables: Identifiers{ "std", "val", @@ -85954,7 +87558,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6055, + context: p7659, freeVariables: Identifiers{ "std", }, @@ -85973,7 +87577,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6055, + context: p7659, freeVariables: Identifiers{ "std", }, @@ -86019,7 +87623,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6156, + context: p7760, freeVariables: Identifiers{ "val", }, @@ -86046,7 +87650,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6055, + context: p7659, freeVariables: Identifiers{ "cflags", "code", @@ -86161,7 +87765,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6055, + context: p7659, freeVariables: Identifiers{ "code", }, @@ -86180,7 +87784,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6055, + context: p7659, freeVariables: Identifiers{ "code", }, @@ -86224,7 +87828,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6055, + context: p7659, freeVariables: nil, }, Value: "d", @@ -86251,7 +87855,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6055, + context: p7659, freeVariables: Identifiers{ "cflags", "i", @@ -86382,7 +87986,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6055, + context: p7659, freeVariables: Identifiers{ "std", "val", @@ -86402,7 +88006,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6055, + context: p7659, freeVariables: Identifiers{ "std", }, @@ -86421,7 +88025,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6055, + context: p7659, freeVariables: Identifiers{ "std", }, @@ -86467,7 +88071,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6195, + context: p7799, freeVariables: Identifiers{ "val", }, @@ -86494,7 +88098,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6055, + context: p7659, freeVariables: nil, }, Value: "number", @@ -86522,7 +88126,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6055, + context: p7659, freeVariables: Identifiers{ "i", "std", @@ -86543,7 +88147,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6055, + context: p7659, freeVariables: Identifiers{ "i", "std", @@ -86564,7 +88168,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6055, + context: p7659, freeVariables: Identifiers{ "i", }, @@ -86583,7 +88187,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6055, + context: p7659, freeVariables: Identifiers{ "i", }, @@ -86602,7 +88206,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6055, + context: p7659, freeVariables: nil, }, Value: "Format required number at ", @@ -86624,7 +88228,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6055, + context: p7659, freeVariables: Identifiers{ "i", }, @@ -86647,7 +88251,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6055, + context: p7659, freeVariables: nil, }, Value: ", got ", @@ -86670,7 +88274,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6055, + context: p7659, freeVariables: Identifiers{ "std", "val", @@ -86690,7 +88294,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6055, + context: p7659, freeVariables: Identifiers{ "std", }, @@ -86709,7 +88313,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6055, + context: p7659, freeVariables: Identifiers{ "std", }, @@ -86755,7 +88359,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6219, + context: p7823, freeVariables: Identifiers{ "val", }, @@ -86784,7 +88388,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6055, + context: p7659, freeVariables: Identifiers{ "cflags", "iprec", @@ -86807,7 +88411,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6055, + context: p7659, freeVariables: Identifiers{ "render_int", }, @@ -86830,7 +88434,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6227, + context: p7831, freeVariables: Identifiers{ "val", }, @@ -86851,7 +88455,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6227, + context: p7831, freeVariables: Identifiers{ "zp", }, @@ -86872,7 +88476,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6227, + context: p7831, freeVariables: Identifiers{ "iprec", }, @@ -86893,7 +88497,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6227, + context: p7831, freeVariables: Identifiers{ "cflags", }, @@ -86912,7 +88516,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6227, + context: p7831, freeVariables: Identifiers{ "cflags", }, @@ -86956,7 +88560,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6227, + context: p7831, freeVariables: Identifiers{ "cflags", }, @@ -86975,7 +88579,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6227, + context: p7831, freeVariables: Identifiers{ "cflags", }, @@ -87019,7 +88623,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6227, + context: p7831, freeVariables: nil, }, Value: float64(10), @@ -87039,7 +88643,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6227, + context: p7831, freeVariables: nil, }, Value: "", @@ -87067,7 +88671,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6055, + context: p7659, freeVariables: Identifiers{ "cflags", "code", @@ -87182,7 +88786,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6055, + context: p7659, freeVariables: Identifiers{ "code", }, @@ -87201,7 +88805,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6055, + context: p7659, freeVariables: Identifiers{ "code", }, @@ -87245,7 +88849,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6055, + context: p7659, freeVariables: nil, }, Value: "o", @@ -87272,7 +88876,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6055, + context: p7659, freeVariables: Identifiers{ "cflags", "i", @@ -87403,7 +89007,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6055, + context: p7659, freeVariables: Identifiers{ "std", "val", @@ -87423,7 +89027,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6055, + context: p7659, freeVariables: Identifiers{ "std", }, @@ -87442,7 +89046,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6055, + context: p7659, freeVariables: Identifiers{ "std", }, @@ -87488,7 +89092,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6282, + context: p7886, freeVariables: Identifiers{ "val", }, @@ -87515,7 +89119,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6055, + context: p7659, freeVariables: nil, }, Value: "number", @@ -87543,7 +89147,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6055, + context: p7659, freeVariables: Identifiers{ "i", "std", @@ -87564,7 +89168,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6055, + context: p7659, freeVariables: Identifiers{ "i", "std", @@ -87585,7 +89189,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6055, + context: p7659, freeVariables: Identifiers{ "i", }, @@ -87604,7 +89208,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6055, + context: p7659, freeVariables: Identifiers{ "i", }, @@ -87623,7 +89227,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6055, + context: p7659, freeVariables: nil, }, Value: "Format required number at ", @@ -87645,7 +89249,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6055, + context: p7659, freeVariables: Identifiers{ "i", }, @@ -87668,7 +89272,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6055, + context: p7659, freeVariables: nil, }, Value: ", got ", @@ -87691,7 +89295,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6055, + context: p7659, freeVariables: Identifiers{ "std", "val", @@ -87711,7 +89315,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6055, + context: p7659, freeVariables: Identifiers{ "std", }, @@ -87730,7 +89334,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6055, + context: p7659, freeVariables: Identifiers{ "std", }, @@ -87776,7 +89380,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6306, + context: p7910, freeVariables: Identifiers{ "val", }, @@ -87805,7 +89409,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6055, + context: p7659, freeVariables: Identifiers{ "cflags", "iprec", @@ -87831,7 +89435,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6312, + context: p7916, freeVariables: Identifiers{ "cflags", }, @@ -87850,7 +89454,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6312, + context: p7916, freeVariables: Identifiers{ "cflags", }, @@ -87869,7 +89473,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6312, + context: p7916, freeVariables: Identifiers{ "cflags", }, @@ -87913,7 +89517,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6312, + context: p7916, freeVariables: nil, }, Value: "0", @@ -87934,7 +89538,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6312, + context: p7916, freeVariables: nil, }, Value: "", @@ -87959,7 +89563,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6055, + context: p7659, freeVariables: Identifiers{ "cflags", "iprec", @@ -87983,7 +89587,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6055, + context: p7659, freeVariables: Identifiers{ "render_int", }, @@ -88006,7 +89610,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6327, + context: p7931, freeVariables: Identifiers{ "val", }, @@ -88027,7 +89631,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6327, + context: p7931, freeVariables: Identifiers{ "zp", }, @@ -88048,7 +89652,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6327, + context: p7931, freeVariables: Identifiers{ "iprec", }, @@ -88069,7 +89673,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6327, + context: p7931, freeVariables: Identifiers{ "cflags", }, @@ -88088,7 +89692,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6327, + context: p7931, freeVariables: Identifiers{ "cflags", }, @@ -88132,7 +89736,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6327, + context: p7931, freeVariables: Identifiers{ "cflags", }, @@ -88151,7 +89755,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6327, + context: p7931, freeVariables: Identifiers{ "cflags", }, @@ -88195,7 +89799,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6327, + context: p7931, freeVariables: nil, }, Value: float64(8), @@ -88215,7 +89819,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6327, + context: p7931, freeVariables: Identifiers{ "zero_prefix", }, @@ -88244,7 +89848,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6055, + context: p7659, freeVariables: Identifiers{ "cflags", "code", @@ -88358,7 +89962,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6055, + context: p7659, freeVariables: Identifiers{ "code", }, @@ -88377,7 +89981,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6055, + context: p7659, freeVariables: Identifiers{ "code", }, @@ -88421,7 +90025,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6055, + context: p7659, freeVariables: nil, }, Value: "x", @@ -88448,7 +90052,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6055, + context: p7659, freeVariables: Identifiers{ "cflags", "code", @@ -88580,7 +90184,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6055, + context: p7659, freeVariables: Identifiers{ "std", "val", @@ -88600,7 +90204,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6055, + context: p7659, freeVariables: Identifiers{ "std", }, @@ -88619,7 +90223,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6055, + context: p7659, freeVariables: Identifiers{ "std", }, @@ -88665,7 +90269,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6383, + context: p7987, freeVariables: Identifiers{ "val", }, @@ -88692,7 +90296,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6055, + context: p7659, freeVariables: nil, }, Value: "number", @@ -88720,7 +90324,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6055, + context: p7659, freeVariables: Identifiers{ "i", "std", @@ -88741,7 +90345,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6055, + context: p7659, freeVariables: Identifiers{ "i", "std", @@ -88762,7 +90366,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6055, + context: p7659, freeVariables: Identifiers{ "i", }, @@ -88781,7 +90385,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6055, + context: p7659, freeVariables: Identifiers{ "i", }, @@ -88800,7 +90404,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6055, + context: p7659, freeVariables: nil, }, Value: "Format required number at ", @@ -88822,7 +90426,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6055, + context: p7659, freeVariables: Identifiers{ "i", }, @@ -88845,7 +90449,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6055, + context: p7659, freeVariables: nil, }, Value: ", got ", @@ -88868,7 +90472,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6055, + context: p7659, freeVariables: Identifiers{ "std", "val", @@ -88888,7 +90492,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6055, + context: p7659, freeVariables: Identifiers{ "std", }, @@ -88907,7 +90511,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6055, + context: p7659, freeVariables: Identifiers{ "std", }, @@ -88953,7 +90557,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6407, + context: p8011, freeVariables: Identifiers{ "val", }, @@ -88982,7 +90586,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6055, + context: p7659, freeVariables: Identifiers{ "cflags", "code", @@ -89006,7 +90610,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6055, + context: p7659, freeVariables: Identifiers{ "render_hex", }, @@ -89029,7 +90633,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6415, + context: p8019, freeVariables: Identifiers{ "val", }, @@ -89050,7 +90654,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6415, + context: p8019, freeVariables: Identifiers{ "zp", }, @@ -89071,7 +90675,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6415, + context: p8019, freeVariables: Identifiers{ "iprec", }, @@ -89092,7 +90696,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6415, + context: p8019, freeVariables: Identifiers{ "cflags", }, @@ -89111,7 +90715,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6415, + context: p8019, freeVariables: Identifiers{ "cflags", }, @@ -89155,7 +90759,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6415, + context: p8019, freeVariables: Identifiers{ "cflags", }, @@ -89174,7 +90778,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6415, + context: p8019, freeVariables: Identifiers{ "cflags", }, @@ -89218,7 +90822,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6415, + context: p8019, freeVariables: Identifiers{ "cflags", }, @@ -89237,7 +90841,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6415, + context: p8019, freeVariables: Identifiers{ "cflags", }, @@ -89281,7 +90885,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6415, + context: p8019, freeVariables: Identifiers{ "code", }, @@ -89300,7 +90904,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6415, + context: p8019, freeVariables: Identifiers{ "code", }, @@ -89351,7 +90955,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6055, + context: p7659, freeVariables: Identifiers{ "cflags", "code", @@ -89463,7 +91067,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6055, + context: p7659, freeVariables: Identifiers{ "code", }, @@ -89482,7 +91086,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6055, + context: p7659, freeVariables: Identifiers{ "code", }, @@ -89526,7 +91130,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6055, + context: p7659, freeVariables: nil, }, Value: "f", @@ -89553,7 +91157,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6055, + context: p7659, freeVariables: Identifiers{ "cflags", "fpprec", @@ -89684,7 +91288,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6055, + context: p7659, freeVariables: Identifiers{ "std", "val", @@ -89704,7 +91308,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6055, + context: p7659, freeVariables: Identifiers{ "std", }, @@ -89723,7 +91327,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6055, + context: p7659, freeVariables: Identifiers{ "std", }, @@ -89769,7 +91373,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6478, + context: p8082, freeVariables: Identifiers{ "val", }, @@ -89796,7 +91400,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6055, + context: p7659, freeVariables: nil, }, Value: "number", @@ -89824,7 +91428,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6055, + context: p7659, freeVariables: Identifiers{ "i", "std", @@ -89845,7 +91449,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6055, + context: p7659, freeVariables: Identifiers{ "i", "std", @@ -89866,7 +91470,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6055, + context: p7659, freeVariables: Identifiers{ "i", }, @@ -89885,7 +91489,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6055, + context: p7659, freeVariables: Identifiers{ "i", }, @@ -89904,7 +91508,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6055, + context: p7659, freeVariables: nil, }, Value: "Format required number at ", @@ -89926,7 +91530,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6055, + context: p7659, freeVariables: Identifiers{ "i", }, @@ -89949,7 +91553,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6055, + context: p7659, freeVariables: nil, }, Value: ", got ", @@ -89972,7 +91576,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6055, + context: p7659, freeVariables: Identifiers{ "std", "val", @@ -89992,7 +91596,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6055, + context: p7659, freeVariables: Identifiers{ "std", }, @@ -90011,7 +91615,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6055, + context: p7659, freeVariables: Identifiers{ "std", }, @@ -90057,7 +91661,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6502, + context: p8106, freeVariables: Identifiers{ "val", }, @@ -90086,7 +91690,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6055, + context: p7659, freeVariables: Identifiers{ "cflags", "fpprec", @@ -90109,7 +91713,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6055, + context: p7659, freeVariables: Identifiers{ "render_float_dec", }, @@ -90132,7 +91736,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6510, + context: p8114, freeVariables: Identifiers{ "val", }, @@ -90153,7 +91757,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6510, + context: p8114, freeVariables: Identifiers{ "zp", }, @@ -90174,7 +91778,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6510, + context: p8114, freeVariables: Identifiers{ "cflags", }, @@ -90193,7 +91797,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6510, + context: p8114, freeVariables: Identifiers{ "cflags", }, @@ -90237,7 +91841,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6510, + context: p8114, freeVariables: Identifiers{ "cflags", }, @@ -90256,7 +91860,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6510, + context: p8114, freeVariables: Identifiers{ "cflags", }, @@ -90300,7 +91904,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6510, + context: p8114, freeVariables: Identifiers{ "cflags", }, @@ -90319,7 +91923,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6510, + context: p8114, freeVariables: Identifiers{ "cflags", }, @@ -90363,7 +91967,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6510, + context: p8114, freeVariables: nil, }, Value: true, @@ -90382,7 +91986,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6510, + context: p8114, freeVariables: Identifiers{ "fpprec", }, @@ -90410,7 +92014,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6055, + context: p7659, freeVariables: Identifiers{ "cflags", "code", @@ -90522,7 +92126,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6055, + context: p7659, freeVariables: Identifiers{ "code", }, @@ -90541,7 +92145,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6055, + context: p7659, freeVariables: Identifiers{ "code", }, @@ -90585,7 +92189,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6055, + context: p7659, freeVariables: nil, }, Value: "e", @@ -90612,7 +92216,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6055, + context: p7659, freeVariables: Identifiers{ "cflags", "code", @@ -90744,7 +92348,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6055, + context: p7659, freeVariables: Identifiers{ "std", "val", @@ -90764,7 +92368,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6055, + context: p7659, freeVariables: Identifiers{ "std", }, @@ -90783,7 +92387,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6055, + context: p7659, freeVariables: Identifiers{ "std", }, @@ -90829,7 +92433,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6569, + context: p8173, freeVariables: Identifiers{ "val", }, @@ -90856,7 +92460,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6055, + context: p7659, freeVariables: nil, }, Value: "number", @@ -90884,7 +92488,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6055, + context: p7659, freeVariables: Identifiers{ "i", "std", @@ -90905,7 +92509,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6055, + context: p7659, freeVariables: Identifiers{ "i", "std", @@ -90926,7 +92530,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6055, + context: p7659, freeVariables: Identifiers{ "i", }, @@ -90945,7 +92549,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6055, + context: p7659, freeVariables: Identifiers{ "i", }, @@ -90964,7 +92568,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6055, + context: p7659, freeVariables: nil, }, Value: "Format required number at ", @@ -90986,7 +92590,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6055, + context: p7659, freeVariables: Identifiers{ "i", }, @@ -91009,7 +92613,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6055, + context: p7659, freeVariables: nil, }, Value: ", got ", @@ -91032,7 +92636,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6055, + context: p7659, freeVariables: Identifiers{ "std", "val", @@ -91052,7 +92656,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6055, + context: p7659, freeVariables: Identifiers{ "std", }, @@ -91071,7 +92675,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6055, + context: p7659, freeVariables: Identifiers{ "std", }, @@ -91117,7 +92721,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6593, + context: p8197, freeVariables: Identifiers{ "val", }, @@ -91146,7 +92750,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6055, + context: p7659, freeVariables: Identifiers{ "cflags", "code", @@ -91170,7 +92774,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6055, + context: p7659, freeVariables: Identifiers{ "render_float_sci", }, @@ -91193,7 +92797,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6601, + context: p8205, freeVariables: Identifiers{ "val", }, @@ -91214,7 +92818,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6601, + context: p8205, freeVariables: Identifiers{ "zp", }, @@ -91235,7 +92839,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6601, + context: p8205, freeVariables: Identifiers{ "cflags", }, @@ -91254,7 +92858,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6601, + context: p8205, freeVariables: Identifiers{ "cflags", }, @@ -91298,7 +92902,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6601, + context: p8205, freeVariables: Identifiers{ "cflags", }, @@ -91317,7 +92921,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6601, + context: p8205, freeVariables: Identifiers{ "cflags", }, @@ -91361,7 +92965,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6601, + context: p8205, freeVariables: Identifiers{ "cflags", }, @@ -91380,7 +92984,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6601, + context: p8205, freeVariables: Identifiers{ "cflags", }, @@ -91424,7 +93028,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6601, + context: p8205, freeVariables: nil, }, Value: true, @@ -91443,7 +93047,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6601, + context: p8205, freeVariables: Identifiers{ "code", }, @@ -91462,7 +93066,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6601, + context: p8205, freeVariables: Identifiers{ "code", }, @@ -91506,7 +93110,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6601, + context: p8205, freeVariables: Identifiers{ "fpprec", }, @@ -91534,7 +93138,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6055, + context: p7659, freeVariables: Identifiers{ "cflags", "code", @@ -91646,7 +93250,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6055, + context: p7659, freeVariables: Identifiers{ "code", }, @@ -91665,7 +93269,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6055, + context: p7659, freeVariables: Identifiers{ "code", }, @@ -91709,7 +93313,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6055, + context: p7659, freeVariables: nil, }, Value: "g", @@ -91736,7 +93340,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6055, + context: p7659, freeVariables: Identifiers{ "cflags", "code", @@ -91869,7 +93473,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6055, + context: p7659, freeVariables: Identifiers{ "std", "val", @@ -91889,7 +93493,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6055, + context: p7659, freeVariables: Identifiers{ "std", }, @@ -91908,7 +93512,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6055, + context: p7659, freeVariables: Identifiers{ "std", }, @@ -91954,7 +93558,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6665, + context: p8269, freeVariables: Identifiers{ "val", }, @@ -91981,7 +93585,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6055, + context: p7659, freeVariables: nil, }, Value: "number", @@ -92009,7 +93613,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6055, + context: p7659, freeVariables: Identifiers{ "i", "std", @@ -92030,7 +93634,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6055, + context: p7659, freeVariables: Identifiers{ "i", "std", @@ -92051,7 +93655,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6055, + context: p7659, freeVariables: Identifiers{ "i", }, @@ -92070,7 +93674,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6055, + context: p7659, freeVariables: Identifiers{ "i", }, @@ -92089,7 +93693,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6055, + context: p7659, freeVariables: nil, }, Value: "Format required number at ", @@ -92111,7 +93715,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6055, + context: p7659, freeVariables: Identifiers{ "i", }, @@ -92134,7 +93738,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6055, + context: p7659, freeVariables: nil, }, Value: ", got ", @@ -92157,7 +93761,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6055, + context: p7659, freeVariables: Identifiers{ "std", "val", @@ -92177,7 +93781,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6055, + context: p7659, freeVariables: Identifiers{ "std", }, @@ -92196,7 +93800,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6055, + context: p7659, freeVariables: Identifiers{ "std", }, @@ -92242,7 +93846,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6689, + context: p8293, freeVariables: Identifiers{ "val", }, @@ -92271,7 +93875,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6055, + context: p7659, freeVariables: Identifiers{ "cflags", "code", @@ -92300,7 +93904,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6695, + context: p8299, freeVariables: Identifiers{ "std", "val", @@ -92320,7 +93924,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6695, + context: p8299, freeVariables: Identifiers{ "std", }, @@ -92339,7 +93943,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6695, + context: p8299, freeVariables: Identifiers{ "std", }, @@ -92385,7 +93989,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6704, + context: p8308, freeVariables: Identifiers{ "std", "val", @@ -92405,7 +94009,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6704, + context: p8308, freeVariables: Identifiers{ "std", "val", @@ -92425,7 +94029,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6704, + context: p8308, freeVariables: Identifiers{ "std", }, @@ -92444,7 +94048,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6704, + context: p8308, freeVariables: Identifiers{ "std", }, @@ -92490,7 +94094,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6715, + context: p8319, freeVariables: Identifiers{ "std", "val", @@ -92510,7 +94114,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6715, + context: p8319, freeVariables: Identifiers{ "std", }, @@ -92529,7 +94133,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6715, + context: p8319, freeVariables: Identifiers{ "std", }, @@ -92575,7 +94179,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6724, + context: p8328, freeVariables: Identifiers{ "val", }, @@ -92609,7 +94213,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6704, + context: p8308, freeVariables: Identifiers{ "std", }, @@ -92628,7 +94232,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6704, + context: p8308, freeVariables: Identifiers{ "std", }, @@ -92647,7 +94251,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6704, + context: p8308, freeVariables: Identifiers{ "std", }, @@ -92693,7 +94297,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6735, + context: p8339, freeVariables: nil, }, Value: float64(10), @@ -92729,7 +94333,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6055, + context: p7659, freeVariables: Identifiers{ "cflags", "code", @@ -92756,7 +94360,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6055, + context: p7659, freeVariables: Identifiers{ "exponent", "fpprec", @@ -92776,7 +94380,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6055, + context: p7659, freeVariables: Identifiers{ "exponent", }, @@ -92795,7 +94399,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6055, + context: p7659, freeVariables: Identifiers{ "exponent", }, @@ -92817,7 +94421,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6055, + context: p7659, freeVariables: nil, }, Op: UnaryOp(3), @@ -92835,7 +94439,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6055, + context: p7659, freeVariables: nil, }, Value: float64(4), @@ -92858,7 +94462,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6055, + context: p7659, freeVariables: Identifiers{ "exponent", "fpprec", @@ -92878,7 +94482,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6055, + context: p7659, freeVariables: Identifiers{ "exponent", }, @@ -92900,7 +94504,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6055, + context: p7659, freeVariables: Identifiers{ "fpprec", }, @@ -92923,7 +94527,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6055, + context: p7659, freeVariables: Identifiers{ "cflags", "code", @@ -92947,7 +94551,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6055, + context: p7659, freeVariables: Identifiers{ "render_float_sci", }, @@ -92970,7 +94574,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6758, + context: p8362, freeVariables: Identifiers{ "val", }, @@ -92991,7 +94595,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6758, + context: p8362, freeVariables: Identifiers{ "zp", }, @@ -93012,7 +94616,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6758, + context: p8362, freeVariables: Identifiers{ "cflags", }, @@ -93031,7 +94635,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6758, + context: p8362, freeVariables: Identifiers{ "cflags", }, @@ -93075,7 +94679,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6758, + context: p8362, freeVariables: Identifiers{ "cflags", }, @@ -93094,7 +94698,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6758, + context: p8362, freeVariables: Identifiers{ "cflags", }, @@ -93138,7 +94742,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6758, + context: p8362, freeVariables: Identifiers{ "cflags", }, @@ -93157,7 +94761,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6758, + context: p8362, freeVariables: Identifiers{ "cflags", }, @@ -93201,7 +94805,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6758, + context: p8362, freeVariables: Identifiers{ "cflags", }, @@ -93220,7 +94824,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6758, + context: p8362, freeVariables: Identifiers{ "cflags", }, @@ -93264,7 +94868,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6758, + context: p8362, freeVariables: Identifiers{ "code", }, @@ -93283,7 +94887,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6758, + context: p8362, freeVariables: Identifiers{ "code", }, @@ -93327,7 +94931,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6758, + context: p8362, freeVariables: Identifiers{ "fpprec", }, @@ -93346,7 +94950,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6758, + context: p8362, freeVariables: Identifiers{ "fpprec", }, @@ -93368,7 +94972,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6758, + context: p8362, freeVariables: nil, }, Value: float64(1), @@ -93395,7 +94999,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6055, + context: p7659, freeVariables: Identifiers{ "cflags", "exponent", @@ -93423,7 +95027,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6796, + context: p8400, freeVariables: Identifiers{ "exponent", "std", @@ -93443,7 +95047,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6796, + context: p8400, freeVariables: Identifiers{ "std", }, @@ -93462,7 +95066,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6796, + context: p8400, freeVariables: Identifiers{ "std", }, @@ -93508,7 +95112,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6805, + context: p8409, freeVariables: nil, }, Value: float64(1), @@ -93528,7 +95132,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6805, + context: p8409, freeVariables: Identifiers{ "exponent", }, @@ -93547,7 +95151,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6805, + context: p8409, freeVariables: Identifiers{ "exponent", }, @@ -93569,7 +95173,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6805, + context: p8409, freeVariables: nil, }, Value: float64(1), @@ -93599,7 +95203,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6055, + context: p7659, freeVariables: Identifiers{ "cflags", "digits_before_pt", @@ -93623,7 +95227,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6055, + context: p7659, freeVariables: Identifiers{ "render_float_dec", }, @@ -93646,7 +95250,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6817, + context: p8421, freeVariables: Identifiers{ "val", }, @@ -93667,7 +95271,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6817, + context: p8421, freeVariables: Identifiers{ "zp", }, @@ -93688,7 +95292,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6817, + context: p8421, freeVariables: Identifiers{ "cflags", }, @@ -93707,7 +95311,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6817, + context: p8421, freeVariables: Identifiers{ "cflags", }, @@ -93751,7 +95355,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6817, + context: p8421, freeVariables: Identifiers{ "cflags", }, @@ -93770,7 +95374,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6817, + context: p8421, freeVariables: Identifiers{ "cflags", }, @@ -93814,7 +95418,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6817, + context: p8421, freeVariables: Identifiers{ "cflags", }, @@ -93833,7 +95437,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6817, + context: p8421, freeVariables: Identifiers{ "cflags", }, @@ -93877,7 +95481,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6817, + context: p8421, freeVariables: Identifiers{ "cflags", }, @@ -93896,7 +95500,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6817, + context: p8421, freeVariables: Identifiers{ "cflags", }, @@ -93940,7 +95544,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6817, + context: p8421, freeVariables: Identifiers{ "digits_before_pt", "fpprec", @@ -93960,7 +95564,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6817, + context: p8421, freeVariables: Identifiers{ "fpprec", }, @@ -93982,7 +95586,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6817, + context: p8421, freeVariables: Identifiers{ "digits_before_pt", }, @@ -94014,7 +95618,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6055, + context: p7659, freeVariables: Identifiers{ "code", "std", @@ -94120,7 +95724,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6055, + context: p7659, freeVariables: Identifiers{ "code", }, @@ -94139,7 +95743,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6055, + context: p7659, freeVariables: Identifiers{ "code", }, @@ -94183,7 +95787,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6055, + context: p7659, freeVariables: nil, }, Value: "c", @@ -94210,7 +95814,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6055, + context: p7659, freeVariables: Identifiers{ "std", "val", @@ -94315,7 +95919,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6055, + context: p7659, freeVariables: Identifiers{ "std", "val", @@ -94335,7 +95939,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6055, + context: p7659, freeVariables: Identifiers{ "std", }, @@ -94354,7 +95958,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6055, + context: p7659, freeVariables: Identifiers{ "std", }, @@ -94400,7 +96004,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6882, + context: p8486, freeVariables: Identifiers{ "val", }, @@ -94427,7 +96031,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6055, + context: p7659, freeVariables: nil, }, Value: "number", @@ -94454,7 +96058,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6055, + context: p7659, freeVariables: Identifiers{ "std", "val", @@ -94474,7 +96078,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6055, + context: p7659, freeVariables: Identifiers{ "std", }, @@ -94493,7 +96097,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6055, + context: p7659, freeVariables: Identifiers{ "std", }, @@ -94539,7 +96143,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6894, + context: p8498, freeVariables: Identifiers{ "val", }, @@ -94566,7 +96170,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6055, + context: p7659, freeVariables: Identifiers{ "std", "val", @@ -94671,7 +96275,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6055, + context: p7659, freeVariables: Identifiers{ "std", "val", @@ -94691,7 +96295,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6055, + context: p7659, freeVariables: Identifiers{ "std", }, @@ -94710,7 +96314,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6055, + context: p7659, freeVariables: Identifiers{ "std", }, @@ -94756,7 +96360,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6915, + context: p8519, freeVariables: Identifiers{ "val", }, @@ -94783,7 +96387,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6055, + context: p7659, freeVariables: nil, }, Value: "string", @@ -94810,7 +96414,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6055, + context: p7659, freeVariables: Identifiers{ "std", "val", @@ -94915,7 +96519,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6055, + context: p7659, freeVariables: Identifiers{ "std", "val", @@ -94935,7 +96539,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6055, + context: p7659, freeVariables: Identifiers{ "std", }, @@ -94954,7 +96558,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6055, + context: p7659, freeVariables: Identifiers{ "std", }, @@ -95000,7 +96604,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6937, + context: p8541, freeVariables: Identifiers{ "val", }, @@ -95027,7 +96631,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6055, + context: p7659, freeVariables: nil, }, Value: float64(1), @@ -95053,7 +96657,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6055, + context: p7659, freeVariables: Identifiers{ "val", }, @@ -95074,7 +96678,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6055, + context: p7659, freeVariables: Identifiers{ "std", "val", @@ -95094,7 +96698,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6055, + context: p7659, freeVariables: Identifiers{ "std", "val", @@ -95114,7 +96718,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6055, + context: p7659, freeVariables: nil, }, Value: "%c expected 1-sized string got: ", @@ -95136,7 +96740,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6055, + context: p7659, freeVariables: Identifiers{ "std", "val", @@ -95156,7 +96760,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6055, + context: p7659, freeVariables: Identifiers{ "std", }, @@ -95175,7 +96779,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6055, + context: p7659, freeVariables: Identifiers{ "std", }, @@ -95221,7 +96825,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6956, + context: p8560, freeVariables: Identifiers{ "val", }, @@ -95251,7 +96855,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6055, + context: p7659, freeVariables: Identifiers{ "std", "val", @@ -95271,7 +96875,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6055, + context: p7659, freeVariables: Identifiers{ "std", "val", @@ -95291,7 +96895,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6055, + context: p7659, freeVariables: nil, }, Value: "%c expected number / string, got: ", @@ -95313,7 +96917,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6055, + context: p7659, freeVariables: Identifiers{ "std", "val", @@ -95333,7 +96937,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6055, + context: p7659, freeVariables: Identifiers{ "std", }, @@ -95352,7 +96956,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6055, + context: p7659, freeVariables: Identifiers{ "std", }, @@ -95398,7 +97002,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6972, + context: p8576, freeVariables: Identifiers{ "val", }, @@ -95429,7 +97033,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6055, + context: p7659, freeVariables: Identifiers{ "code", }, @@ -95448,7 +97052,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6055, + context: p7659, freeVariables: Identifiers{ "code", }, @@ -95467,7 +97071,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6055, + context: p7659, freeVariables: nil, }, Value: "Unknown code: ", @@ -95489,7 +97093,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6055, + context: p7659, freeVariables: Identifiers{ "code", }, @@ -95508,7 +97112,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6055, + context: p7659, freeVariables: Identifiers{ "code", }, @@ -95570,7 +97174,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3236, + context: p4840, freeVariables: Identifiers{ "codes", "format_code", @@ -95597,7 +97201,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6988, + context: p8592, freeVariables: Identifiers{ "format_code", "format_codes_arr", @@ -95631,7 +97235,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6992, + context: p8596, freeVariables: Identifiers{ "arr", "codes", @@ -95659,7 +97263,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6992, + context: p8596, freeVariables: Identifiers{ "codes", "i", @@ -95680,7 +97284,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6992, + context: p8596, freeVariables: Identifiers{ "i", }, @@ -95702,7 +97306,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6992, + context: p8596, freeVariables: Identifiers{ "codes", "std", @@ -95722,7 +97326,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6992, + context: p8596, freeVariables: Identifiers{ "std", }, @@ -95741,7 +97345,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6992, + context: p8596, freeVariables: Identifiers{ "std", }, @@ -95787,7 +97391,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7007, + context: p8611, freeVariables: Identifiers{ "codes", }, @@ -95815,7 +97419,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6992, + context: p8596, freeVariables: Identifiers{ "arr", "j", @@ -95837,7 +97441,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6992, + context: p8596, freeVariables: Identifiers{ "arr", "j", @@ -95858,7 +97462,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6992, + context: p8596, freeVariables: Identifiers{ "j", }, @@ -95880,7 +97484,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6992, + context: p8596, freeVariables: Identifiers{ "arr", "std", @@ -95900,7 +97504,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6992, + context: p8596, freeVariables: Identifiers{ "std", }, @@ -95919,7 +97523,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6992, + context: p8596, freeVariables: Identifiers{ "std", }, @@ -95965,7 +97569,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7024, + context: p8628, freeVariables: Identifiers{ "arr", }, @@ -95993,7 +97597,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6992, + context: p8596, freeVariables: Identifiers{ "arr", "j", @@ -96014,7 +97618,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6992, + context: p8596, freeVariables: Identifiers{ "arr", "j", @@ -96035,7 +97639,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6992, + context: p8596, freeVariables: Identifiers{ "arr", "std", @@ -96055,7 +97659,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6992, + context: p8596, freeVariables: Identifiers{ "arr", "std", @@ -96075,7 +97679,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6992, + context: p8596, freeVariables: nil, }, Value: "Too many values to format: ", @@ -96097,7 +97701,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6992, + context: p8596, freeVariables: Identifiers{ "arr", "std", @@ -96117,7 +97721,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6992, + context: p8596, freeVariables: Identifiers{ "std", }, @@ -96136,7 +97740,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6992, + context: p8596, freeVariables: Identifiers{ "std", }, @@ -96182,7 +97786,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7044, + context: p8648, freeVariables: Identifiers{ "arr", }, @@ -96211,7 +97815,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6992, + context: p8596, freeVariables: nil, }, Value: ", expected ", @@ -96234,7 +97838,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6992, + context: p8596, freeVariables: Identifiers{ "j", }, @@ -96257,7 +97861,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6992, + context: p8596, freeVariables: Identifiers{ "v", }, @@ -96279,7 +97883,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6992, + context: p8596, freeVariables: Identifiers{ "arr", "codes", @@ -96310,7 +97914,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7055, + context: p8659, freeVariables: Identifiers{ "codes", "i", @@ -96330,7 +97934,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7055, + context: p8659, freeVariables: Identifiers{ "codes", }, @@ -96351,7 +97955,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7055, + context: p8659, freeVariables: Identifiers{ "i", }, @@ -96377,7 +97981,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6992, + context: p8596, freeVariables: Identifiers{ "arr", "code", @@ -96491,7 +98095,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6992, + context: p8596, freeVariables: Identifiers{ "code", "std", @@ -96511,7 +98115,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6992, + context: p8596, freeVariables: Identifiers{ "std", }, @@ -96530,7 +98134,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6992, + context: p8596, freeVariables: Identifiers{ "std", }, @@ -96576,7 +98180,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7080, + context: p8684, freeVariables: Identifiers{ "code", }, @@ -96603,7 +98207,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6992, + context: p8596, freeVariables: nil, }, Value: "string", @@ -96630,7 +98234,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6992, + context: p8596, freeVariables: Identifiers{ "arr", "code", @@ -96655,7 +98259,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6992, + context: p8596, freeVariables: Identifiers{ "format_codes_arr", }, @@ -96678,7 +98282,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7089, + context: p8693, freeVariables: Identifiers{ "codes", }, @@ -96699,7 +98303,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7089, + context: p8693, freeVariables: Identifiers{ "arr", }, @@ -96720,7 +98324,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7089, + context: p8693, freeVariables: Identifiers{ "i", }, @@ -96739,7 +98343,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7089, + context: p8693, freeVariables: Identifiers{ "i", }, @@ -96761,7 +98365,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7089, + context: p8693, freeVariables: nil, }, Value: float64(1), @@ -96782,7 +98386,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7089, + context: p8693, freeVariables: Identifiers{ "j", }, @@ -96803,7 +98407,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7089, + context: p8693, freeVariables: Identifiers{ "code", "v", @@ -96823,7 +98427,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7089, + context: p8693, freeVariables: Identifiers{ "v", }, @@ -96845,7 +98449,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7089, + context: p8693, freeVariables: Identifiers{ "code", }, @@ -96873,7 +98477,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6992, + context: p8596, freeVariables: Identifiers{ "arr", "code", @@ -96905,7 +98509,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7110, + context: p8714, freeVariables: Identifiers{ "arr", "code", @@ -97012,7 +98616,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7110, + context: p8714, freeVariables: Identifiers{ "code", }, @@ -97031,7 +98635,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7110, + context: p8714, freeVariables: Identifiers{ "code", }, @@ -97075,7 +98679,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7110, + context: p8714, freeVariables: nil, }, Value: "*", @@ -97102,7 +98706,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7110, + context: p8714, freeVariables: Identifiers{ "arr", "j", @@ -97148,7 +98752,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7131, + context: p8735, freeVariables: Identifiers{ "j", }, @@ -97167,7 +98771,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7131, + context: p8735, freeVariables: Identifiers{ "j", }, @@ -97189,7 +98793,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7131, + context: p8735, freeVariables: nil, }, Value: float64(1), @@ -97235,7 +98839,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7131, + context: p8735, freeVariables: Identifiers{ "arr", "j", @@ -97256,7 +98860,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7131, + context: p8735, freeVariables: Identifiers{ "arr", "j", @@ -97277,7 +98881,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7131, + context: p8735, freeVariables: Identifiers{ "j", }, @@ -97299,7 +98903,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7131, + context: p8735, freeVariables: Identifiers{ "arr", "std", @@ -97319,7 +98923,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7131, + context: p8735, freeVariables: Identifiers{ "std", }, @@ -97338,7 +98942,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7131, + context: p8735, freeVariables: Identifiers{ "std", }, @@ -97384,7 +98988,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7152, + context: p8756, freeVariables: Identifiers{ "arr", }, @@ -97412,7 +99016,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7131, + context: p8735, freeVariables: Identifiers{ "arr", "std", @@ -97432,7 +99036,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7131, + context: p8735, freeVariables: Identifiers{ "arr", "std", @@ -97452,7 +99056,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7131, + context: p8735, freeVariables: nil, }, Value: "Not enough values to format: ", @@ -97474,7 +99078,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7131, + context: p8735, freeVariables: Identifiers{ "arr", "std", @@ -97494,7 +99098,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7131, + context: p8735, freeVariables: Identifiers{ "std", }, @@ -97513,7 +99117,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7131, + context: p8735, freeVariables: Identifiers{ "std", }, @@ -97559,7 +99163,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7168, + context: p8772, freeVariables: Identifiers{ "arr", }, @@ -97588,7 +99192,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7131, + context: p8735, freeVariables: Identifiers{ "arr", "j", @@ -97608,7 +99212,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7131, + context: p8735, freeVariables: Identifiers{ "arr", }, @@ -97629,7 +99233,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7131, + context: p8735, freeVariables: Identifiers{ "j", }, @@ -97657,7 +99261,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7110, + context: p8714, freeVariables: Identifiers{ "code", "j", @@ -97702,7 +99306,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7181, + context: p8785, freeVariables: Identifiers{ "j", }, @@ -97748,7 +99352,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7181, + context: p8785, freeVariables: Identifiers{ "code", }, @@ -97767,7 +99371,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7181, + context: p8785, freeVariables: Identifiers{ "code", }, @@ -97819,7 +99423,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6992, + context: p8596, freeVariables: Identifiers{ "arr", "code", @@ -97851,7 +99455,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7193, + context: p8797, freeVariables: Identifiers{ "arr", "code", @@ -97958,7 +99562,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7193, + context: p8797, freeVariables: Identifiers{ "code", }, @@ -97977,7 +99581,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7193, + context: p8797, freeVariables: Identifiers{ "code", }, @@ -98021,7 +99625,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7193, + context: p8797, freeVariables: nil, }, Value: "*", @@ -98048,7 +99652,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7193, + context: p8797, freeVariables: Identifiers{ "arr", "std", @@ -98094,7 +99698,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7214, + context: p8818, freeVariables: Identifiers{ "tmp", }, @@ -98113,7 +99717,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7214, + context: p8818, freeVariables: Identifiers{ "tmp", }, @@ -98132,7 +99736,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7214, + context: p8818, freeVariables: Identifiers{ "tmp", }, @@ -98177,7 +99781,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7214, + context: p8818, freeVariables: nil, }, Value: float64(1), @@ -98223,7 +99827,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7214, + context: p8818, freeVariables: Identifiers{ "arr", "std", @@ -98244,7 +99848,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7214, + context: p8818, freeVariables: Identifiers{ "arr", "std", @@ -98265,7 +99869,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7214, + context: p8818, freeVariables: Identifiers{ "tmp", }, @@ -98284,7 +99888,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7214, + context: p8818, freeVariables: Identifiers{ "tmp", }, @@ -98329,7 +99933,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7214, + context: p8818, freeVariables: Identifiers{ "arr", "std", @@ -98349,7 +99953,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7214, + context: p8818, freeVariables: Identifiers{ "std", }, @@ -98368,7 +99972,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7214, + context: p8818, freeVariables: Identifiers{ "std", }, @@ -98414,7 +100018,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7241, + context: p8845, freeVariables: Identifiers{ "arr", }, @@ -98442,7 +100046,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7214, + context: p8818, freeVariables: Identifiers{ "arr", "std", @@ -98462,7 +100066,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7214, + context: p8818, freeVariables: Identifiers{ "arr", "std", @@ -98482,7 +100086,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7214, + context: p8818, freeVariables: nil, }, Value: "Not enough values to format: ", @@ -98504,7 +100108,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7214, + context: p8818, freeVariables: Identifiers{ "arr", "std", @@ -98524,7 +100128,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7214, + context: p8818, freeVariables: Identifiers{ "std", }, @@ -98543,7 +100147,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7214, + context: p8818, freeVariables: Identifiers{ "std", }, @@ -98589,7 +100193,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7257, + context: p8861, freeVariables: Identifiers{ "arr", }, @@ -98618,7 +100222,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7214, + context: p8818, freeVariables: Identifiers{ "arr", "tmp", @@ -98638,7 +100242,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7214, + context: p8818, freeVariables: Identifiers{ "arr", }, @@ -98659,7 +100263,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7214, + context: p8818, freeVariables: Identifiers{ "tmp", }, @@ -98678,7 +100282,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7214, + context: p8818, freeVariables: Identifiers{ "tmp", }, @@ -98729,7 +100333,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7193, + context: p8797, freeVariables: Identifiers{ "code", "tmp", @@ -98774,7 +100378,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7273, + context: p8877, freeVariables: Identifiers{ "tmp", }, @@ -98793,7 +100397,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7273, + context: p8877, freeVariables: Identifiers{ "tmp", }, @@ -98862,7 +100466,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7273, + context: p8877, freeVariables: Identifiers{ "code", }, @@ -98881,7 +100485,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7273, + context: p8877, freeVariables: Identifiers{ "code", }, @@ -98933,7 +100537,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6992, + context: p8596, freeVariables: Identifiers{ "arr", "code", @@ -98966,7 +100570,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7288, + context: p8892, freeVariables: Identifiers{ "tmp2", }, @@ -98985,7 +100589,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7288, + context: p8892, freeVariables: Identifiers{ "tmp2", }, @@ -99032,7 +100636,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6992, + context: p8596, freeVariables: Identifiers{ "arr", "code", @@ -99066,7 +100670,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7297, + context: p8901, freeVariables: Identifiers{ "arr", "j2", @@ -99087,7 +100691,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7297, + context: p8901, freeVariables: Identifiers{ "arr", "j2", @@ -99108,7 +100712,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7297, + context: p8901, freeVariables: Identifiers{ "j2", }, @@ -99130,7 +100734,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7297, + context: p8901, freeVariables: Identifiers{ "arr", "std", @@ -99150,7 +100754,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7297, + context: p8901, freeVariables: Identifiers{ "std", }, @@ -99169,7 +100773,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7297, + context: p8901, freeVariables: Identifiers{ "std", }, @@ -99215,7 +100819,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7312, + context: p8916, freeVariables: Identifiers{ "arr", }, @@ -99243,7 +100847,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7297, + context: p8901, freeVariables: Identifiers{ "arr", "j2", @@ -99263,7 +100867,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7297, + context: p8901, freeVariables: Identifiers{ "arr", }, @@ -99284,7 +100888,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7297, + context: p8901, freeVariables: Identifiers{ "j2", }, @@ -99307,7 +100911,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7297, + context: p8901, freeVariables: Identifiers{ "arr", "std", @@ -99327,7 +100931,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7297, + context: p8901, freeVariables: Identifiers{ "arr", "std", @@ -99347,7 +100951,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7297, + context: p8901, freeVariables: nil, }, Value: "Not enough values to format, got ", @@ -99369,7 +100973,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7297, + context: p8901, freeVariables: Identifiers{ "arr", "std", @@ -99389,7 +100993,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7297, + context: p8901, freeVariables: Identifiers{ "std", }, @@ -99408,7 +101012,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7297, + context: p8901, freeVariables: Identifiers{ "std", }, @@ -99454,7 +101058,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7334, + context: p8938, freeVariables: Identifiers{ "arr", }, @@ -99487,7 +101091,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6992, + context: p8596, freeVariables: Identifiers{ "arr", "code", @@ -99522,7 +101126,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7340, + context: p8944, freeVariables: Identifiers{ "code", "format_code", @@ -99632,7 +101236,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7340, + context: p8944, freeVariables: Identifiers{ "code", }, @@ -99651,7 +101255,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7340, + context: p8944, freeVariables: Identifiers{ "code", }, @@ -99695,7 +101299,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7340, + context: p8944, freeVariables: nil, }, Value: "%", @@ -99722,7 +101326,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7340, + context: p8944, freeVariables: nil, }, Value: "%", @@ -99743,7 +101347,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7340, + context: p8944, freeVariables: Identifiers{ "code", "format_code", @@ -99767,7 +101371,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7340, + context: p8944, freeVariables: Identifiers{ "format_code", }, @@ -99790,7 +101394,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7363, + context: p8967, freeVariables: Identifiers{ "val", }, @@ -99811,7 +101415,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7363, + context: p8967, freeVariables: Identifiers{ "code", }, @@ -99832,7 +101436,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7363, + context: p8967, freeVariables: Identifiers{ "tmp", }, @@ -99851,7 +101455,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7363, + context: p8967, freeVariables: Identifiers{ "tmp", }, @@ -99895,7 +101499,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7363, + context: p8967, freeVariables: Identifiers{ "tmp2", }, @@ -99914,7 +101518,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7363, + context: p8967, freeVariables: Identifiers{ "tmp2", }, @@ -99958,7 +101562,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7363, + context: p8967, freeVariables: Identifiers{ "j2", }, @@ -99989,7 +101593,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6992, + context: p8596, freeVariables: Identifiers{ "arr", "code", @@ -100022,7 +101626,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7383, + context: p8987, freeVariables: Identifiers{ "code", "pad_left", @@ -100045,7 +101649,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7383, + context: p8987, freeVariables: Identifiers{ "code", }, @@ -100064,7 +101668,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7383, + context: p8987, freeVariables: Identifiers{ "code", }, @@ -100083,7 +101687,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7383, + context: p8987, freeVariables: Identifiers{ "code", }, @@ -100150,7 +101754,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7383, + context: p8987, freeVariables: Identifiers{ "pad_right", "s", @@ -100171,7 +101775,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7383, + context: p8987, freeVariables: Identifiers{ "pad_right", }, @@ -100194,7 +101798,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7399, + context: p9003, freeVariables: Identifiers{ "s", }, @@ -100215,7 +101819,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7399, + context: p9003, freeVariables: Identifiers{ "tmp", }, @@ -100234,7 +101838,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7399, + context: p9003, freeVariables: Identifiers{ "tmp", }, @@ -100278,7 +101882,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7399, + context: p9003, freeVariables: nil, }, Value: " ", @@ -100305,7 +101909,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7383, + context: p8987, freeVariables: Identifiers{ "pad_left", "s", @@ -100326,7 +101930,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7383, + context: p8987, freeVariables: Identifiers{ "pad_left", }, @@ -100349,7 +101953,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7413, + context: p9017, freeVariables: Identifiers{ "s", }, @@ -100370,7 +101974,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7413, + context: p9017, freeVariables: Identifiers{ "tmp", }, @@ -100389,7 +101993,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7413, + context: p9017, freeVariables: Identifiers{ "tmp", }, @@ -100433,7 +102037,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7413, + context: p9017, freeVariables: nil, }, Value: " ", @@ -100464,7 +102068,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6992, + context: p8596, freeVariables: Identifiers{ "arr", "code", @@ -100494,7 +102098,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7425, + context: p9029, freeVariables: Identifiers{ "code", "j2", @@ -100600,7 +102204,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7425, + context: p9029, freeVariables: Identifiers{ "code", }, @@ -100619,7 +102223,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7425, + context: p9029, freeVariables: Identifiers{ "code", }, @@ -100663,7 +102267,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7425, + context: p9029, freeVariables: nil, }, Value: "%", @@ -100690,7 +102294,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7425, + context: p9029, freeVariables: Identifiers{ "j2", }, @@ -100711,7 +102315,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7425, + context: p9029, freeVariables: Identifiers{ "j2", }, @@ -100730,7 +102334,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7425, + context: p9029, freeVariables: Identifiers{ "j2", }, @@ -100752,7 +102356,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7425, + context: p9029, freeVariables: nil, }, Value: float64(1), @@ -100777,7 +102381,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6992, + context: p8596, freeVariables: Identifiers{ "arr", "codes", @@ -100802,7 +102406,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p6992, + context: p8596, freeVariables: Identifiers{ "format_codes_arr", }, @@ -100825,7 +102429,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7454, + context: p9058, freeVariables: Identifiers{ "codes", }, @@ -100846,7 +102450,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7454, + context: p9058, freeVariables: Identifiers{ "arr", }, @@ -100867,7 +102471,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7454, + context: p9058, freeVariables: Identifiers{ "i", }, @@ -100886,7 +102490,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7454, + context: p9058, freeVariables: Identifiers{ "i", }, @@ -100908,7 +102512,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7454, + context: p9058, freeVariables: nil, }, Value: float64(1), @@ -100929,7 +102533,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7454, + context: p9058, freeVariables: Identifiers{ "j3", }, @@ -100950,7 +102554,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7454, + context: p9058, freeVariables: Identifiers{ "s_padded", "v", @@ -100970,7 +102574,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7454, + context: p9058, freeVariables: Identifiers{ "v", }, @@ -100992,7 +102596,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7454, + context: p9058, freeVariables: Identifiers{ "s_padded", }, @@ -101034,7 +102638,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3236, + context: p4840, freeVariables: Identifiers{ "codes", "format_code", @@ -101062,7 +102666,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7475, + context: p9079, freeVariables: Identifiers{ "format_code", "format_codes_obj", @@ -101095,7 +102699,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7479, + context: p9083, freeVariables: Identifiers{ "codes", "format_code", @@ -101122,7 +102726,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7479, + context: p9083, freeVariables: Identifiers{ "codes", "i", @@ -101143,7 +102747,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7479, + context: p9083, freeVariables: Identifiers{ "i", }, @@ -101165,7 +102769,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7479, + context: p9083, freeVariables: Identifiers{ "codes", "std", @@ -101185,7 +102789,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7479, + context: p9083, freeVariables: Identifiers{ "std", }, @@ -101204,7 +102808,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7479, + context: p9083, freeVariables: Identifiers{ "std", }, @@ -101250,7 +102854,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7494, + context: p9098, freeVariables: Identifiers{ "codes", }, @@ -101278,7 +102882,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7479, + context: p9083, freeVariables: Identifiers{ "v", }, @@ -101299,7 +102903,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7479, + context: p9083, freeVariables: Identifiers{ "codes", "format_code", @@ -101329,7 +102933,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7502, + context: p9106, freeVariables: Identifiers{ "codes", "i", @@ -101349,7 +102953,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7502, + context: p9106, freeVariables: Identifiers{ "codes", }, @@ -101370,7 +102974,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7502, + context: p9106, freeVariables: Identifiers{ "i", }, @@ -101396,7 +103000,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7479, + context: p9083, freeVariables: Identifiers{ "code", "codes", @@ -101509,7 +103113,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7479, + context: p9083, freeVariables: Identifiers{ "code", "std", @@ -101529,7 +103133,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7479, + context: p9083, freeVariables: Identifiers{ "std", }, @@ -101548,7 +103152,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7479, + context: p9083, freeVariables: Identifiers{ "std", }, @@ -101594,7 +103198,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7527, + context: p9131, freeVariables: Identifiers{ "code", }, @@ -101621,7 +103225,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7479, + context: p9083, freeVariables: nil, }, Value: "string", @@ -101648,7 +103252,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7479, + context: p9083, freeVariables: Identifiers{ "code", "codes", @@ -101672,7 +103276,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7479, + context: p9083, freeVariables: Identifiers{ "format_codes_obj", }, @@ -101695,7 +103299,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7536, + context: p9140, freeVariables: Identifiers{ "codes", }, @@ -101716,7 +103320,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7536, + context: p9140, freeVariables: Identifiers{ "obj", }, @@ -101737,7 +103341,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7536, + context: p9140, freeVariables: Identifiers{ "i", }, @@ -101756,7 +103360,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7536, + context: p9140, freeVariables: Identifiers{ "i", }, @@ -101778,7 +103382,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7536, + context: p9140, freeVariables: nil, }, Value: float64(1), @@ -101799,7 +103403,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7536, + context: p9140, freeVariables: Identifiers{ "code", "v", @@ -101819,7 +103423,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7536, + context: p9140, freeVariables: Identifiers{ "v", }, @@ -101841,7 +103445,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7536, + context: p9140, freeVariables: Identifiers{ "code", }, @@ -101869,7 +103473,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7479, + context: p9083, freeVariables: Identifiers{ "code", "codes", @@ -101900,7 +103504,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7555, + context: p9159, freeVariables: Identifiers{ "code", "std", @@ -102005,7 +103609,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7555, + context: p9159, freeVariables: Identifiers{ "code", }, @@ -102024,7 +103628,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7555, + context: p9159, freeVariables: Identifiers{ "code", }, @@ -102068,7 +103672,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7555, + context: p9159, freeVariables: nil, }, }, @@ -102092,7 +103696,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7555, + context: p9159, freeVariables: nil, }, Expr: &LiteralString{ @@ -102109,7 +103713,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7555, + context: p9159, freeVariables: nil, }, Value: "Mapping keys required.", @@ -102131,7 +103735,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7555, + context: p9159, freeVariables: Identifiers{ "code", }, @@ -102150,7 +103754,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7555, + context: p9159, freeVariables: Identifiers{ "code", }, @@ -102198,7 +103802,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7479, + context: p9083, freeVariables: Identifiers{ "code", "codes", @@ -102230,7 +103834,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7582, + context: p9186, freeVariables: Identifiers{ "code", "std", @@ -102335,7 +103939,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7582, + context: p9186, freeVariables: Identifiers{ "code", }, @@ -102354,7 +103958,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7582, + context: p9186, freeVariables: Identifiers{ "code", }, @@ -102398,7 +104002,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7582, + context: p9186, freeVariables: nil, }, Value: "*", @@ -102425,7 +104029,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7582, + context: p9186, freeVariables: nil, }, Expr: &LiteralString{ @@ -102442,7 +104046,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7582, + context: p9186, freeVariables: nil, }, Value: "Cannot use * field width with object.", @@ -102464,7 +104068,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7582, + context: p9186, freeVariables: Identifiers{ "code", }, @@ -102483,7 +104087,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7582, + context: p9186, freeVariables: Identifiers{ "code", }, @@ -102531,7 +104135,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7479, + context: p9083, freeVariables: Identifiers{ "code", "codes", @@ -102564,7 +104168,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7609, + context: p9213, freeVariables: Identifiers{ "code", "std", @@ -102669,7 +104273,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7609, + context: p9213, freeVariables: Identifiers{ "code", }, @@ -102688,7 +104292,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7609, + context: p9213, freeVariables: Identifiers{ "code", }, @@ -102732,7 +104336,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7609, + context: p9213, freeVariables: nil, }, Value: "*", @@ -102759,7 +104363,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7609, + context: p9213, freeVariables: nil, }, Expr: &LiteralString{ @@ -102776,7 +104380,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7609, + context: p9213, freeVariables: nil, }, Value: "Cannot use * precision with object.", @@ -102798,7 +104402,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7609, + context: p9213, freeVariables: Identifiers{ "code", }, @@ -102817,7 +104421,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7609, + context: p9213, freeVariables: Identifiers{ "code", }, @@ -102865,7 +104469,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7479, + context: p9083, freeVariables: Identifiers{ "code", "codes", @@ -102899,7 +104503,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7636, + context: p9240, freeVariables: Identifiers{ "f", "obj", @@ -102920,7 +104524,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7636, + context: p9240, freeVariables: Identifiers{ "f", "obj", @@ -102941,7 +104545,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7636, + context: p9240, freeVariables: Identifiers{ "std", }, @@ -102960,7 +104564,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7636, + context: p9240, freeVariables: Identifiers{ "std", }, @@ -103006,7 +104610,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7647, + context: p9251, freeVariables: Identifiers{ "obj", }, @@ -103027,7 +104631,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7647, + context: p9251, freeVariables: Identifiers{ "f", }, @@ -103054,7 +104658,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7636, + context: p9240, freeVariables: Identifiers{ "f", "obj", @@ -103074,7 +104678,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7636, + context: p9240, freeVariables: Identifiers{ "obj", }, @@ -103095,7 +104699,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7636, + context: p9240, freeVariables: Identifiers{ "f", }, @@ -103118,7 +104722,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7636, + context: p9240, freeVariables: Identifiers{ "f", }, @@ -103137,7 +104741,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7636, + context: p9240, freeVariables: Identifiers{ "f", }, @@ -103156,7 +104760,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7636, + context: p9240, freeVariables: nil, }, Value: "No such field: ", @@ -103178,7 +104782,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7636, + context: p9240, freeVariables: Identifiers{ "f", }, @@ -103205,7 +104809,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7479, + context: p9083, freeVariables: Identifiers{ "code", "codes", @@ -103240,7 +104844,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7668, + context: p9272, freeVariables: Identifiers{ "code", "f", @@ -103350,7 +104954,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7668, + context: p9272, freeVariables: Identifiers{ "code", }, @@ -103369,7 +104973,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7668, + context: p9272, freeVariables: Identifiers{ "code", }, @@ -103413,7 +105017,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7668, + context: p9272, freeVariables: nil, }, Value: "%", @@ -103440,7 +105044,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7668, + context: p9272, freeVariables: nil, }, Value: "%", @@ -103461,7 +105065,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7668, + context: p9272, freeVariables: Identifiers{ "code", "f", @@ -103485,7 +105089,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7668, + context: p9272, freeVariables: Identifiers{ "format_code", }, @@ -103508,7 +105112,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7691, + context: p9295, freeVariables: Identifiers{ "val", }, @@ -103529,7 +105133,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7691, + context: p9295, freeVariables: Identifiers{ "code", }, @@ -103550,7 +105154,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7691, + context: p9295, freeVariables: Identifiers{ "fw", }, @@ -103571,7 +105175,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7691, + context: p9295, freeVariables: Identifiers{ "prec", }, @@ -103592,7 +105196,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7691, + context: p9295, freeVariables: Identifiers{ "f", }, @@ -103623,7 +105227,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7479, + context: p9083, freeVariables: Identifiers{ "code", "codes", @@ -103654,7 +105258,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7705, + context: p9309, freeVariables: Identifiers{ "code", "fw", @@ -103677,7 +105281,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7705, + context: p9309, freeVariables: Identifiers{ "code", }, @@ -103696,7 +105300,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7705, + context: p9309, freeVariables: Identifiers{ "code", }, @@ -103715,7 +105319,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7705, + context: p9309, freeVariables: Identifiers{ "code", }, @@ -103782,7 +105386,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7705, + context: p9309, freeVariables: Identifiers{ "fw", "pad_right", @@ -103803,7 +105407,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7705, + context: p9309, freeVariables: Identifiers{ "pad_right", }, @@ -103826,7 +105430,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7721, + context: p9325, freeVariables: Identifiers{ "s", }, @@ -103847,7 +105451,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7721, + context: p9325, freeVariables: Identifiers{ "fw", }, @@ -103868,7 +105472,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7721, + context: p9325, freeVariables: nil, }, Value: " ", @@ -103895,7 +105499,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7705, + context: p9309, freeVariables: Identifiers{ "fw", "pad_left", @@ -103916,7 +105520,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7705, + context: p9309, freeVariables: Identifiers{ "pad_left", }, @@ -103939,7 +105543,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7732, + context: p9336, freeVariables: Identifiers{ "s", }, @@ -103960,7 +105564,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7732, + context: p9336, freeVariables: Identifiers{ "fw", }, @@ -103981,7 +105585,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7732, + context: p9336, freeVariables: nil, }, Value: " ", @@ -104012,7 +105616,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7479, + context: p9083, freeVariables: Identifiers{ "codes", "format_codes_obj", @@ -104036,7 +105640,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7479, + context: p9083, freeVariables: Identifiers{ "format_codes_obj", }, @@ -104059,7 +105663,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7743, + context: p9347, freeVariables: Identifiers{ "codes", }, @@ -104080,7 +105684,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7743, + context: p9347, freeVariables: Identifiers{ "obj", }, @@ -104101,7 +105705,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7743, + context: p9347, freeVariables: Identifiers{ "i", }, @@ -104120,7 +105724,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7743, + context: p9347, freeVariables: Identifiers{ "i", }, @@ -104142,7 +105746,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7743, + context: p9347, freeVariables: nil, }, Value: float64(1), @@ -104163,7 +105767,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7743, + context: p9347, freeVariables: Identifiers{ "s_padded", "v", @@ -104183,7 +105787,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7743, + context: p9347, freeVariables: Identifiers{ "v", }, @@ -104205,7 +105809,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7743, + context: p9347, freeVariables: Identifiers{ "s_padded", }, @@ -104246,7 +105850,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3236, + context: p4840, freeVariables: Identifiers{ "codes", "format_codes_arr", @@ -104354,7 +105958,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3236, + context: p4840, freeVariables: Identifiers{ "std", "vals", @@ -104374,7 +105978,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3236, + context: p4840, freeVariables: Identifiers{ "std", }, @@ -104393,7 +105997,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3236, + context: p4840, freeVariables: Identifiers{ "std", }, @@ -104439,7 +106043,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7777, + context: p9381, freeVariables: Identifiers{ "vals", }, @@ -104466,7 +106070,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3236, + context: p4840, freeVariables: nil, }, Value: "array", @@ -104493,7 +106097,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3236, + context: p4840, freeVariables: Identifiers{ "codes", "format_codes_arr", @@ -104514,7 +106118,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3236, + context: p4840, freeVariables: Identifiers{ "format_codes_arr", }, @@ -104537,7 +106141,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7786, + context: p9390, freeVariables: Identifiers{ "codes", }, @@ -104558,7 +106162,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7786, + context: p9390, freeVariables: Identifiers{ "vals", }, @@ -104579,7 +106183,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7786, + context: p9390, freeVariables: nil, }, Value: float64(0), @@ -104599,7 +106203,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7786, + context: p9390, freeVariables: nil, }, Value: float64(0), @@ -104619,7 +106223,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7786, + context: p9390, freeVariables: nil, }, Value: "", @@ -104646,7 +106250,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3236, + context: p4840, freeVariables: Identifiers{ "codes", "format_codes_arr", @@ -104754,7 +106358,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3236, + context: p4840, freeVariables: Identifiers{ "std", "vals", @@ -104774,7 +106378,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3236, + context: p4840, freeVariables: Identifiers{ "std", }, @@ -104793,7 +106397,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3236, + context: p4840, freeVariables: Identifiers{ "std", }, @@ -104839,7 +106443,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7812, + context: p9416, freeVariables: Identifiers{ "vals", }, @@ -104866,7 +106470,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3236, + context: p4840, freeVariables: nil, }, Value: "object", @@ -104893,7 +106497,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3236, + context: p4840, freeVariables: Identifiers{ "codes", "format_codes_obj", @@ -104914,7 +106518,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3236, + context: p4840, freeVariables: Identifiers{ "format_codes_obj", }, @@ -104937,7 +106541,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7821, + context: p9425, freeVariables: Identifiers{ "codes", }, @@ -104958,7 +106562,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7821, + context: p9425, freeVariables: Identifiers{ "vals", }, @@ -104979,7 +106583,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7821, + context: p9425, freeVariables: nil, }, Value: float64(0), @@ -104999,7 +106603,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7821, + context: p9425, freeVariables: nil, }, Value: "", @@ -105026,7 +106630,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3236, + context: p4840, freeVariables: Identifiers{ "codes", "format_codes_arr", @@ -105047,7 +106651,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p3236, + context: p4840, freeVariables: Identifiers{ "format_codes_arr", }, @@ -105070,7 +106674,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7833, + context: p9437, freeVariables: Identifiers{ "codes", }, @@ -105091,7 +106695,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7833, + context: p9437, freeVariables: Identifiers{ "vals", }, @@ -105111,7 +106715,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7839, + context: p9443, freeVariables: Identifiers{ "vals", }, @@ -105135,7 +106739,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7833, + context: p9437, freeVariables: nil, }, Value: float64(0), @@ -105155,7 +106759,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7833, + context: p9437, freeVariables: nil, }, Value: float64(0), @@ -105175,7 +106779,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7833, + context: p9437, freeVariables: nil, }, Value: "", @@ -105271,7 +106875,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p9452, freeVariables: nil, }, }, @@ -105293,7 +106897,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p9454, freeVariables: nil, }, Value: "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/", @@ -105535,7 +107139,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p9478, freeVariables: Identifiers{ "base64_table", "i", @@ -105559,7 +107163,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p9482, freeVariables: Identifiers{ "base64_table", "i", @@ -105579,7 +107183,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p9485, freeVariables: Identifiers{ "base64_table", }, @@ -105600,7 +107204,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p9488, freeVariables: Identifiers{ "i", }, @@ -105623,7 +107227,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p43, + context: p9491, freeVariables: Identifiers{ "i", }, @@ -105652,7 +107256,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p9494, freeVariables: Identifiers{ "std", }, @@ -105671,7 +107275,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p9497, freeVariables: Identifiers{ "std", }, @@ -105690,7 +107294,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p9500, freeVariables: Identifiers{ "std", }, @@ -105736,7 +107340,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p54, + context: p9505, freeVariables: nil, }, Value: float64(0), @@ -105756,14 +107360,14 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p54, + context: p9507, freeVariables: nil, }, Value: float64(63), OriginalString: "63", }, }, - Named: nil, + Named: []NamedArgument{}, }, TrailingComma: false, TailStrict: false, @@ -105819,7 +107423,7 @@ var StdAst = &DesugaredObject{ }, file: nil, }, - context: p9, + context: p68, freeVariables: Identifiers{ "std", }, @@ -105847,7 +107451,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7850, + context: p9513, freeVariables: Identifiers{ "arr", "func", @@ -105872,7 +107476,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7854, + context: p9517, freeVariables: Identifiers{ "aux", }, @@ -105901,7 +107505,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7858, + context: p9521, freeVariables: Identifiers{ "arr", "aux", @@ -105924,7 +107528,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7858, + context: p9521, freeVariables: Identifiers{ "idx", }, @@ -105943,7 +107547,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7858, + context: p9521, freeVariables: Identifiers{ "idx", }, @@ -105965,7 +107569,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7858, + context: p9521, freeVariables: nil, }, Value: float64(0), @@ -105986,7 +107590,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7858, + context: p9521, freeVariables: Identifiers{ "running", }, @@ -106007,7 +107611,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7858, + context: p9521, freeVariables: Identifiers{ "arr", "aux", @@ -106030,7 +107634,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7858, + context: p9521, freeVariables: Identifiers{ "aux", }, @@ -106053,7 +107657,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7873, + context: p9536, freeVariables: Identifiers{ "func", }, @@ -106074,7 +107678,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7873, + context: p9536, freeVariables: Identifiers{ "arr", }, @@ -106095,7 +107699,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7873, + context: p9536, freeVariables: Identifiers{ "arr", "func", @@ -106117,7 +107721,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7873, + context: p9536, freeVariables: Identifiers{ "func", }, @@ -106140,7 +107744,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7883, + context: p9546, freeVariables: Identifiers{ "arr", "idx", @@ -106160,7 +107764,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7883, + context: p9546, freeVariables: Identifiers{ "arr", }, @@ -106181,7 +107785,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7883, + context: p9546, freeVariables: Identifiers{ "idx", }, @@ -106204,7 +107808,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7883, + context: p9546, freeVariables: Identifiers{ "running", }, @@ -106231,7 +107835,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7873, + context: p9536, freeVariables: Identifiers{ "idx", }, @@ -106250,7 +107854,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7873, + context: p9536, freeVariables: Identifiers{ "idx", }, @@ -106272,7 +107876,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7873, + context: p9536, freeVariables: nil, }, Value: float64(1), @@ -106304,7 +107908,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7850, + context: p9513, freeVariables: Identifiers{ "arr", "aux", @@ -106327,7 +107931,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7850, + context: p9513, freeVariables: Identifiers{ "aux", }, @@ -106350,7 +107954,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7902, + context: p9565, freeVariables: Identifiers{ "func", }, @@ -106371,7 +107975,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7902, + context: p9565, freeVariables: Identifiers{ "arr", }, @@ -106392,7 +107996,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7902, + context: p9565, freeVariables: Identifiers{ "init", }, @@ -106413,7 +108017,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7902, + context: p9565, freeVariables: Identifiers{ "arr", "std", @@ -106433,7 +108037,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7902, + context: p9565, freeVariables: Identifiers{ "arr", "std", @@ -106453,7 +108057,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7902, + context: p9565, freeVariables: Identifiers{ "std", }, @@ -106472,7 +108076,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7902, + context: p9565, freeVariables: Identifiers{ "std", }, @@ -106518,7 +108122,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7919, + context: p9582, freeVariables: Identifiers{ "arr", }, @@ -106546,7 +108150,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7902, + context: p9565, freeVariables: nil, }, Value: float64(1), @@ -106621,7 +108225,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p9589, freeVariables: nil, }, }, @@ -106643,7 +108247,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p9591, freeVariables: nil, }, Value: "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/", @@ -106885,7 +108489,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p9615, freeVariables: Identifiers{ "base64_table", "i", @@ -106909,7 +108513,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p9619, freeVariables: Identifiers{ "base64_table", "i", @@ -106929,7 +108533,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p9622, freeVariables: Identifiers{ "base64_table", }, @@ -106950,7 +108554,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p9625, freeVariables: Identifiers{ "i", }, @@ -106973,7 +108577,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p43, + context: p9628, freeVariables: Identifiers{ "i", }, @@ -107002,7 +108606,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p9631, freeVariables: Identifiers{ "std", }, @@ -107021,7 +108625,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p9634, freeVariables: Identifiers{ "std", }, @@ -107040,7 +108644,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p9637, freeVariables: Identifiers{ "std", }, @@ -107086,7 +108690,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p54, + context: p9642, freeVariables: nil, }, Value: float64(0), @@ -107106,14 +108710,14 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p54, + context: p9644, freeVariables: nil, }, Value: float64(63), OriginalString: "63", }, }, - Named: nil, + Named: []NamedArgument{}, }, TrailingComma: false, TailStrict: false, @@ -107169,7 +108773,7 @@ var StdAst = &DesugaredObject{ }, file: nil, }, - context: p9, + context: p68, freeVariables: Identifiers{ "std", }, @@ -107197,7 +108801,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7928, + context: p9650, freeVariables: Identifiers{ "arr", "func", @@ -107222,7 +108826,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7932, + context: p9654, freeVariables: Identifiers{ "aux", "std", @@ -107252,7 +108856,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7936, + context: p9658, freeVariables: Identifiers{ "arr", "aux", @@ -107276,7 +108880,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7936, + context: p9658, freeVariables: Identifiers{ "arr", "idx", @@ -107297,7 +108901,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7936, + context: p9658, freeVariables: Identifiers{ "idx", }, @@ -107319,7 +108923,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7936, + context: p9658, freeVariables: Identifiers{ "arr", "std", @@ -107339,7 +108943,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7936, + context: p9658, freeVariables: Identifiers{ "std", }, @@ -107358,7 +108962,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7936, + context: p9658, freeVariables: Identifiers{ "std", }, @@ -107404,7 +109008,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7951, + context: p9673, freeVariables: Identifiers{ "arr", }, @@ -107432,7 +109036,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7936, + context: p9658, freeVariables: Identifiers{ "running", }, @@ -107453,7 +109057,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7936, + context: p9658, freeVariables: Identifiers{ "arr", "aux", @@ -107476,7 +109080,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7936, + context: p9658, freeVariables: Identifiers{ "aux", }, @@ -107499,7 +109103,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7961, + context: p9683, freeVariables: Identifiers{ "func", }, @@ -107520,7 +109124,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7961, + context: p9683, freeVariables: Identifiers{ "arr", }, @@ -107541,7 +109145,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7961, + context: p9683, freeVariables: Identifiers{ "arr", "func", @@ -107563,7 +109167,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7961, + context: p9683, freeVariables: Identifiers{ "func", }, @@ -107586,7 +109190,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7971, + context: p9693, freeVariables: Identifiers{ "running", }, @@ -107607,7 +109211,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7971, + context: p9693, freeVariables: Identifiers{ "arr", "idx", @@ -107627,7 +109231,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7971, + context: p9693, freeVariables: Identifiers{ "arr", }, @@ -107648,7 +109252,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7971, + context: p9693, freeVariables: Identifiers{ "idx", }, @@ -107677,7 +109281,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7961, + context: p9683, freeVariables: Identifiers{ "idx", }, @@ -107696,7 +109300,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7961, + context: p9683, freeVariables: Identifiers{ "idx", }, @@ -107718,7 +109322,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7961, + context: p9683, freeVariables: nil, }, Value: float64(1), @@ -107750,7 +109354,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7928, + context: p9650, freeVariables: Identifiers{ "arr", "aux", @@ -107772,7 +109376,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7928, + context: p9650, freeVariables: Identifiers{ "aux", }, @@ -107795,7 +109399,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7990, + context: p9712, freeVariables: Identifiers{ "func", }, @@ -107816,7 +109420,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7990, + context: p9712, freeVariables: Identifiers{ "arr", }, @@ -107837,7 +109441,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7990, + context: p9712, freeVariables: Identifiers{ "init", }, @@ -107858,7 +109462,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p7990, + context: p9712, freeVariables: nil, }, Value: float64(0), @@ -107932,7 +109536,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p9723, freeVariables: nil, }, }, @@ -107954,7 +109558,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p9725, freeVariables: nil, }, Value: "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/", @@ -108196,7 +109800,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p9749, freeVariables: Identifiers{ "base64_table", "i", @@ -108220,7 +109824,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p9753, freeVariables: Identifiers{ "base64_table", "i", @@ -108240,7 +109844,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p9756, freeVariables: Identifiers{ "base64_table", }, @@ -108261,7 +109865,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p9759, freeVariables: Identifiers{ "i", }, @@ -108284,7 +109888,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p43, + context: p9762, freeVariables: Identifiers{ "i", }, @@ -108313,7 +109917,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p9765, freeVariables: Identifiers{ "std", }, @@ -108332,7 +109936,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p9768, freeVariables: Identifiers{ "std", }, @@ -108351,7 +109955,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p9771, freeVariables: Identifiers{ "std", }, @@ -108397,7 +110001,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p54, + context: p9776, freeVariables: nil, }, Value: float64(0), @@ -108417,14 +110021,14 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p54, + context: p9778, freeVariables: nil, }, Value: float64(63), OriginalString: "63", }, }, - Named: nil, + Named: []NamedArgument{}, }, TrailingComma: false, TailStrict: false, @@ -108480,7 +110084,7 @@ var StdAst = &DesugaredObject{ }, file: nil, }, - context: p9, + context: p68, freeVariables: Identifiers{ "std", }, @@ -108508,7 +110112,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8003, + context: p9784, freeVariables: Identifiers{ "arr", "filter_func", @@ -108636,7 +110240,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8003, + context: p9784, freeVariables: Identifiers{ "filter_func", "std", @@ -108656,7 +110260,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8003, + context: p9784, freeVariables: Identifiers{ "std", }, @@ -108675,7 +110279,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8003, + context: p9784, freeVariables: Identifiers{ "std", }, @@ -108721,7 +110325,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8024, + context: p9805, freeVariables: Identifiers{ "filter_func", }, @@ -108748,7 +110352,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8003, + context: p9784, freeVariables: nil, }, Value: "function", @@ -108776,7 +110380,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8003, + context: p9784, freeVariables: Identifiers{ "filter_func", "std", @@ -108796,7 +110400,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8003, + context: p9784, freeVariables: Identifiers{ "filter_func", "std", @@ -108816,7 +110420,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8003, + context: p9784, freeVariables: nil, }, Value: "std.filterMap first param must be function, got ", @@ -108838,7 +110442,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8003, + context: p9784, freeVariables: Identifiers{ "filter_func", "std", @@ -108858,7 +110462,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8003, + context: p9784, freeVariables: Identifiers{ "std", }, @@ -108877,7 +110481,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8003, + context: p9784, freeVariables: Identifiers{ "std", }, @@ -108923,7 +110527,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8041, + context: p9822, freeVariables: Identifiers{ "filter_func", }, @@ -108952,7 +110556,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8003, + context: p9784, freeVariables: Identifiers{ "arr", "filter_func", @@ -109080,7 +110684,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8003, + context: p9784, freeVariables: Identifiers{ "map_func", "std", @@ -109100,7 +110704,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8003, + context: p9784, freeVariables: Identifiers{ "std", }, @@ -109119,7 +110723,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8003, + context: p9784, freeVariables: Identifiers{ "std", }, @@ -109165,7 +110769,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8064, + context: p9845, freeVariables: Identifiers{ "map_func", }, @@ -109192,7 +110796,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8003, + context: p9784, freeVariables: nil, }, Value: "function", @@ -109220,7 +110824,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8003, + context: p9784, freeVariables: Identifiers{ "map_func", "std", @@ -109240,7 +110844,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8003, + context: p9784, freeVariables: Identifiers{ "map_func", "std", @@ -109260,7 +110864,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8003, + context: p9784, freeVariables: nil, }, Value: "std.filterMap second param must be function, got ", @@ -109282,7 +110886,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8003, + context: p9784, freeVariables: Identifiers{ "map_func", "std", @@ -109302,7 +110906,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8003, + context: p9784, freeVariables: Identifiers{ "std", }, @@ -109321,7 +110925,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8003, + context: p9784, freeVariables: Identifiers{ "std", }, @@ -109367,7 +110971,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8081, + context: p9862, freeVariables: Identifiers{ "map_func", }, @@ -109396,7 +111000,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8003, + context: p9784, freeVariables: Identifiers{ "arr", "filter_func", @@ -109524,7 +111128,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8003, + context: p9784, freeVariables: Identifiers{ "arr", "std", @@ -109544,7 +111148,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8003, + context: p9784, freeVariables: Identifiers{ "std", }, @@ -109563,7 +111167,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8003, + context: p9784, freeVariables: Identifiers{ "std", }, @@ -109609,7 +111213,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8104, + context: p9885, freeVariables: Identifiers{ "arr", }, @@ -109636,7 +111240,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8003, + context: p9784, freeVariables: nil, }, Value: "array", @@ -109664,7 +111268,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8003, + context: p9784, freeVariables: Identifiers{ "arr", "std", @@ -109684,7 +111288,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8003, + context: p9784, freeVariables: Identifiers{ "arr", "std", @@ -109704,7 +111308,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8003, + context: p9784, freeVariables: nil, }, Value: "std.filterMap third param must be array, got ", @@ -109726,7 +111330,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8003, + context: p9784, freeVariables: Identifiers{ "arr", "std", @@ -109746,7 +111350,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8003, + context: p9784, freeVariables: Identifiers{ "std", }, @@ -109765,7 +111369,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8003, + context: p9784, freeVariables: Identifiers{ "std", }, @@ -109811,7 +111415,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8121, + context: p9902, freeVariables: Identifiers{ "arr", }, @@ -109840,7 +111444,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8003, + context: p9784, freeVariables: Identifiers{ "arr", "filter_func", @@ -109862,7 +111466,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8003, + context: p9784, freeVariables: Identifiers{ "std", }, @@ -109881,7 +111485,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8003, + context: p9784, freeVariables: Identifiers{ "std", }, @@ -109927,7 +111531,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8132, + context: p9913, freeVariables: Identifiers{ "map_func", }, @@ -109948,7 +111552,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8132, + context: p9913, freeVariables: Identifiers{ "arr", "filter_func", @@ -109969,7 +111573,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8132, + context: p9913, freeVariables: Identifiers{ "std", }, @@ -109988,7 +111592,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8132, + context: p9913, freeVariables: Identifiers{ "std", }, @@ -110034,7 +111638,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8143, + context: p9924, freeVariables: Identifiers{ "filter_func", }, @@ -110055,7 +111659,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8143, + context: p9924, freeVariables: Identifiers{ "arr", }, @@ -110138,7 +111742,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p9932, freeVariables: nil, }, }, @@ -110160,7 +111764,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p9934, freeVariables: nil, }, Value: "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/", @@ -110402,7 +112006,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p9958, freeVariables: Identifiers{ "base64_table", "i", @@ -110426,7 +112030,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p9962, freeVariables: Identifiers{ "base64_table", "i", @@ -110446,7 +112050,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p9965, freeVariables: Identifiers{ "base64_table", }, @@ -110467,7 +112071,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p9968, freeVariables: Identifiers{ "i", }, @@ -110490,7 +112094,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p43, + context: p9971, freeVariables: Identifiers{ "i", }, @@ -110519,7 +112123,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p9974, freeVariables: Identifiers{ "std", }, @@ -110538,7 +112142,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p9977, freeVariables: Identifiers{ "std", }, @@ -110557,7 +112161,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p9980, freeVariables: Identifiers{ "std", }, @@ -110603,7 +112207,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p54, + context: p9985, freeVariables: nil, }, Value: float64(0), @@ -110623,14 +112227,14 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p54, + context: p9987, freeVariables: nil, }, Value: float64(63), OriginalString: "63", }, }, - Named: nil, + Named: []NamedArgument{}, }, TrailingComma: false, TailStrict: false, @@ -110686,7 +112290,7 @@ var StdAst = &DesugaredObject{ }, file: nil, }, - context: p9, + context: p68, freeVariables: Identifiers{ "std", }, @@ -110713,7 +112317,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8153, + context: p9993, freeVariables: Identifiers{ "a", "b", @@ -110820,7 +112424,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8153, + context: p9993, freeVariables: Identifiers{ "a", }, @@ -110841,7 +112445,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8153, + context: p9993, freeVariables: Identifiers{ "b", }, @@ -110868,7 +112472,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8153, + context: p9993, freeVariables: nil, }, Value: true, @@ -110887,7 +112491,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8153, + context: p9993, freeVariables: Identifiers{ "a", "b", @@ -110907,7 +112511,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8153, + context: p9993, freeVariables: Identifiers{ "a", "b", @@ -110927,7 +112531,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8153, + context: p9993, freeVariables: Identifiers{ "a", }, @@ -110946,7 +112550,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8153, + context: p9993, freeVariables: Identifiers{ "a", }, @@ -110965,7 +112569,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8153, + context: p9993, freeVariables: nil, }, Value: "Assertion failed. ", @@ -110987,7 +112591,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8153, + context: p9993, freeVariables: Identifiers{ "a", }, @@ -111010,7 +112614,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8153, + context: p9993, freeVariables: nil, }, Value: " != ", @@ -111033,7 +112637,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8153, + context: p9993, freeVariables: Identifiers{ "b", }, @@ -111104,7 +112708,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p10026, freeVariables: nil, }, }, @@ -111126,7 +112730,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p10028, freeVariables: nil, }, Value: "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/", @@ -111368,7 +112972,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p10052, freeVariables: Identifiers{ "base64_table", "i", @@ -111392,7 +112996,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p10056, freeVariables: Identifiers{ "base64_table", "i", @@ -111412,7 +113016,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p10059, freeVariables: Identifiers{ "base64_table", }, @@ -111433,7 +113037,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p10062, freeVariables: Identifiers{ "i", }, @@ -111456,7 +113060,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p43, + context: p10065, freeVariables: Identifiers{ "i", }, @@ -111485,7 +113089,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p10068, freeVariables: Identifiers{ "std", }, @@ -111504,7 +113108,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p10071, freeVariables: Identifiers{ "std", }, @@ -111523,7 +113127,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p10074, freeVariables: Identifiers{ "std", }, @@ -111569,7 +113173,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p54, + context: p10079, freeVariables: nil, }, Value: float64(0), @@ -111589,14 +113193,14 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p54, + context: p10081, freeVariables: nil, }, Value: float64(63), OriginalString: "63", }, }, - Named: nil, + Named: []NamedArgument{}, }, TrailingComma: false, TailStrict: false, @@ -111652,7 +113256,7 @@ var StdAst = &DesugaredObject{ }, file: nil, }, - context: p9, + context: p68, freeVariables: Identifiers{ "std", }, @@ -111678,7 +113282,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8188, + context: p10087, freeVariables: Identifiers{ "n", "std", @@ -111804,7 +113408,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8188, + context: p10087, freeVariables: Identifiers{ "n", "std", @@ -111824,7 +113428,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8188, + context: p10087, freeVariables: Identifiers{ "std", }, @@ -111843,7 +113447,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8188, + context: p10087, freeVariables: Identifiers{ "std", }, @@ -111889,7 +113493,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8209, + context: p10108, freeVariables: Identifiers{ "n", }, @@ -111916,7 +113520,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8188, + context: p10087, freeVariables: nil, }, Value: "number", @@ -111944,7 +113548,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8188, + context: p10087, freeVariables: Identifiers{ "n", "std", @@ -111964,7 +113568,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8188, + context: p10087, freeVariables: Identifiers{ "n", "std", @@ -111984,7 +113588,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8188, + context: p10087, freeVariables: nil, }, Value: "std.abs expected number, got ", @@ -112006,7 +113610,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8188, + context: p10087, freeVariables: Identifiers{ "n", "std", @@ -112026,7 +113630,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8188, + context: p10087, freeVariables: Identifiers{ "std", }, @@ -112045,7 +113649,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8188, + context: p10087, freeVariables: Identifiers{ "std", }, @@ -112091,7 +113695,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8226, + context: p10125, freeVariables: Identifiers{ "n", }, @@ -112120,7 +113724,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8188, + context: p10087, freeVariables: Identifiers{ "n", }, @@ -112139,7 +113743,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8188, + context: p10087, freeVariables: Identifiers{ "n", }, @@ -112158,7 +113762,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8188, + context: p10087, freeVariables: Identifiers{ "n", }, @@ -112180,7 +113784,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8188, + context: p10087, freeVariables: nil, }, Value: float64(0), @@ -112201,7 +113805,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8188, + context: p10087, freeVariables: Identifiers{ "n", }, @@ -112222,7 +113826,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8188, + context: p10087, freeVariables: Identifiers{ "n", }, @@ -112242,7 +113846,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8188, + context: p10087, freeVariables: Identifiers{ "n", }, @@ -112313,7 +113917,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p10144, freeVariables: nil, }, }, @@ -112335,7 +113939,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p10146, freeVariables: nil, }, Value: "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/", @@ -112577,7 +114181,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p10170, freeVariables: Identifiers{ "base64_table", "i", @@ -112601,7 +114205,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p10174, freeVariables: Identifiers{ "base64_table", "i", @@ -112621,7 +114225,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p10177, freeVariables: Identifiers{ "base64_table", }, @@ -112642,7 +114246,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p10180, freeVariables: Identifiers{ "i", }, @@ -112665,7 +114269,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p43, + context: p10183, freeVariables: Identifiers{ "i", }, @@ -112694,7 +114298,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p10186, freeVariables: Identifiers{ "std", }, @@ -112713,7 +114317,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p10189, freeVariables: Identifiers{ "std", }, @@ -112732,7 +114336,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p10192, freeVariables: Identifiers{ "std", }, @@ -112778,7 +114382,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p54, + context: p10197, freeVariables: nil, }, Value: float64(0), @@ -112798,14 +114402,14 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p54, + context: p10199, freeVariables: nil, }, Value: float64(63), OriginalString: "63", }, }, - Named: nil, + Named: []NamedArgument{}, }, TrailingComma: false, TailStrict: false, @@ -112861,7 +114465,7 @@ var StdAst = &DesugaredObject{ }, file: nil, }, - context: p9, + context: p68, freeVariables: Identifiers{ "std", }, @@ -112887,7 +114491,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8247, + context: p10205, freeVariables: Identifiers{ "n", "std", @@ -113013,7 +114617,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8247, + context: p10205, freeVariables: Identifiers{ "n", "std", @@ -113033,7 +114637,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8247, + context: p10205, freeVariables: Identifiers{ "std", }, @@ -113052,7 +114656,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8247, + context: p10205, freeVariables: Identifiers{ "std", }, @@ -113098,7 +114702,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8268, + context: p10226, freeVariables: Identifiers{ "n", }, @@ -113125,7 +114729,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8247, + context: p10205, freeVariables: nil, }, Value: "number", @@ -113153,7 +114757,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8247, + context: p10205, freeVariables: Identifiers{ "n", "std", @@ -113173,7 +114777,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8247, + context: p10205, freeVariables: Identifiers{ "n", "std", @@ -113193,7 +114797,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8247, + context: p10205, freeVariables: nil, }, Value: "std.sign expected number, got ", @@ -113215,7 +114819,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8247, + context: p10205, freeVariables: Identifiers{ "n", "std", @@ -113235,7 +114839,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8247, + context: p10205, freeVariables: Identifiers{ "std", }, @@ -113254,7 +114858,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8247, + context: p10205, freeVariables: Identifiers{ "std", }, @@ -113300,7 +114904,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8285, + context: p10243, freeVariables: Identifiers{ "n", }, @@ -113329,7 +114933,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8247, + context: p10205, freeVariables: Identifiers{ "n", }, @@ -113348,7 +114952,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8247, + context: p10205, freeVariables: Identifiers{ "n", }, @@ -113367,7 +114971,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8247, + context: p10205, freeVariables: Identifiers{ "n", }, @@ -113389,7 +114993,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8247, + context: p10205, freeVariables: nil, }, Value: float64(0), @@ -113410,7 +115014,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8247, + context: p10205, freeVariables: nil, }, Value: float64(1), @@ -113430,7 +115034,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8247, + context: p10205, freeVariables: Identifiers{ "n", }, @@ -113449,7 +115053,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8247, + context: p10205, freeVariables: Identifiers{ "n", }, @@ -113468,7 +115072,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8247, + context: p10205, freeVariables: Identifiers{ "n", }, @@ -113490,7 +115094,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8247, + context: p10205, freeVariables: nil, }, Value: float64(0), @@ -113511,7 +115115,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8247, + context: p10205, freeVariables: nil, }, Op: UnaryOp(3), @@ -113529,7 +115133,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8247, + context: p10205, freeVariables: nil, }, Value: float64(1), @@ -113550,7 +115154,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8247, + context: p10205, freeVariables: nil, }, Value: float64(0), @@ -113620,7 +115224,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p10267, freeVariables: nil, }, }, @@ -113642,7 +115246,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p10269, freeVariables: nil, }, Value: "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/", @@ -113884,7 +115488,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p10293, freeVariables: Identifiers{ "base64_table", "i", @@ -113908,7 +115512,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p10297, freeVariables: Identifiers{ "base64_table", "i", @@ -113928,7 +115532,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p10300, freeVariables: Identifiers{ "base64_table", }, @@ -113949,7 +115553,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p10303, freeVariables: Identifiers{ "i", }, @@ -113972,7 +115576,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p43, + context: p10306, freeVariables: Identifiers{ "i", }, @@ -114001,7 +115605,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p10309, freeVariables: Identifiers{ "std", }, @@ -114020,7 +115624,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p10312, freeVariables: Identifiers{ "std", }, @@ -114039,7 +115643,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p10315, freeVariables: Identifiers{ "std", }, @@ -114085,7 +115689,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p54, + context: p10320, freeVariables: nil, }, Value: float64(0), @@ -114105,14 +115709,14 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p54, + context: p10322, freeVariables: nil, }, Value: float64(63), OriginalString: "63", }, }, - Named: nil, + Named: []NamedArgument{}, }, TrailingComma: false, TailStrict: false, @@ -114168,7 +115772,7 @@ var StdAst = &DesugaredObject{ }, file: nil, }, - context: p9, + context: p68, freeVariables: Identifiers{ "std", }, @@ -114195,7 +115799,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8311, + context: p10328, freeVariables: Identifiers{ "a", "b", @@ -114322,7 +115926,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8311, + context: p10328, freeVariables: Identifiers{ "a", "std", @@ -114342,7 +115946,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8311, + context: p10328, freeVariables: Identifiers{ "std", }, @@ -114361,7 +115965,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8311, + context: p10328, freeVariables: Identifiers{ "std", }, @@ -114407,7 +116011,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8332, + context: p10349, freeVariables: Identifiers{ "a", }, @@ -114434,7 +116038,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8311, + context: p10328, freeVariables: nil, }, Value: "number", @@ -114462,7 +116066,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8311, + context: p10328, freeVariables: Identifiers{ "a", "std", @@ -114482,7 +116086,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8311, + context: p10328, freeVariables: Identifiers{ "a", "std", @@ -114502,7 +116106,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8311, + context: p10328, freeVariables: nil, }, Value: "std.max first param expected number, got ", @@ -114524,7 +116128,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8311, + context: p10328, freeVariables: Identifiers{ "a", "std", @@ -114544,7 +116148,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8311, + context: p10328, freeVariables: Identifiers{ "std", }, @@ -114563,7 +116167,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8311, + context: p10328, freeVariables: Identifiers{ "std", }, @@ -114609,7 +116213,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8349, + context: p10366, freeVariables: Identifiers{ "a", }, @@ -114638,7 +116242,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8311, + context: p10328, freeVariables: Identifiers{ "a", "b", @@ -114765,7 +116369,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8311, + context: p10328, freeVariables: Identifiers{ "b", "std", @@ -114785,7 +116389,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8311, + context: p10328, freeVariables: Identifiers{ "std", }, @@ -114804,7 +116408,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8311, + context: p10328, freeVariables: Identifiers{ "std", }, @@ -114850,7 +116454,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8372, + context: p10389, freeVariables: Identifiers{ "b", }, @@ -114877,7 +116481,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8311, + context: p10328, freeVariables: nil, }, Value: "number", @@ -114905,7 +116509,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8311, + context: p10328, freeVariables: Identifiers{ "b", "std", @@ -114925,7 +116529,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8311, + context: p10328, freeVariables: Identifiers{ "b", "std", @@ -114945,7 +116549,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8311, + context: p10328, freeVariables: nil, }, Value: "std.max second param expected number, got ", @@ -114967,7 +116571,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8311, + context: p10328, freeVariables: Identifiers{ "b", "std", @@ -114987,7 +116591,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8311, + context: p10328, freeVariables: Identifiers{ "std", }, @@ -115006,7 +116610,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8311, + context: p10328, freeVariables: Identifiers{ "std", }, @@ -115052,7 +116656,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8389, + context: p10406, freeVariables: Identifiers{ "b", }, @@ -115081,7 +116685,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8311, + context: p10328, freeVariables: Identifiers{ "a", "b", @@ -115101,7 +116705,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8311, + context: p10328, freeVariables: Identifiers{ "a", "b", @@ -115121,7 +116725,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8311, + context: p10328, freeVariables: Identifiers{ "a", }, @@ -115143,7 +116747,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8311, + context: p10328, freeVariables: Identifiers{ "b", }, @@ -115165,7 +116769,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8311, + context: p10328, freeVariables: Identifiers{ "a", }, @@ -115186,7 +116790,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8311, + context: p10328, freeVariables: Identifiers{ "b", }, @@ -115257,7 +116861,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p10424, freeVariables: nil, }, }, @@ -115279,7 +116883,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p10426, freeVariables: nil, }, Value: "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/", @@ -115521,7 +117125,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p10450, freeVariables: Identifiers{ "base64_table", "i", @@ -115545,7 +117149,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p10454, freeVariables: Identifiers{ "base64_table", "i", @@ -115565,7 +117169,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p10457, freeVariables: Identifiers{ "base64_table", }, @@ -115586,7 +117190,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p10460, freeVariables: Identifiers{ "i", }, @@ -115609,7 +117213,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p43, + context: p10463, freeVariables: Identifiers{ "i", }, @@ -115638,7 +117242,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p10466, freeVariables: Identifiers{ "std", }, @@ -115657,7 +117261,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p10469, freeVariables: Identifiers{ "std", }, @@ -115676,7 +117280,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p10472, freeVariables: Identifiers{ "std", }, @@ -115722,7 +117326,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p54, + context: p10477, freeVariables: nil, }, Value: float64(0), @@ -115742,14 +117346,14 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p54, + context: p10479, freeVariables: nil, }, Value: float64(63), OriginalString: "63", }, }, - Named: nil, + Named: []NamedArgument{}, }, TrailingComma: false, TailStrict: false, @@ -115805,7 +117409,7 @@ var StdAst = &DesugaredObject{ }, file: nil, }, - context: p9, + context: p68, freeVariables: Identifiers{ "std", }, @@ -115832,7 +117436,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8409, + context: p10485, freeVariables: Identifiers{ "a", "b", @@ -115959,7 +117563,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8409, + context: p10485, freeVariables: Identifiers{ "a", "std", @@ -115979,7 +117583,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8409, + context: p10485, freeVariables: Identifiers{ "std", }, @@ -115998,7 +117602,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8409, + context: p10485, freeVariables: Identifiers{ "std", }, @@ -116044,7 +117648,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8430, + context: p10506, freeVariables: Identifiers{ "a", }, @@ -116071,7 +117675,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8409, + context: p10485, freeVariables: nil, }, Value: "number", @@ -116099,7 +117703,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8409, + context: p10485, freeVariables: Identifiers{ "a", "std", @@ -116119,7 +117723,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8409, + context: p10485, freeVariables: Identifiers{ "a", "std", @@ -116139,7 +117743,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8409, + context: p10485, freeVariables: nil, }, Value: "std.max first param expected number, got ", @@ -116161,7 +117765,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8409, + context: p10485, freeVariables: Identifiers{ "a", "std", @@ -116181,7 +117785,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8409, + context: p10485, freeVariables: Identifiers{ "std", }, @@ -116200,7 +117804,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8409, + context: p10485, freeVariables: Identifiers{ "std", }, @@ -116246,7 +117850,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8447, + context: p10523, freeVariables: Identifiers{ "a", }, @@ -116275,7 +117879,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8409, + context: p10485, freeVariables: Identifiers{ "a", "b", @@ -116402,7 +118006,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8409, + context: p10485, freeVariables: Identifiers{ "b", "std", @@ -116422,7 +118026,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8409, + context: p10485, freeVariables: Identifiers{ "std", }, @@ -116441,7 +118045,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8409, + context: p10485, freeVariables: Identifiers{ "std", }, @@ -116487,7 +118091,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8470, + context: p10546, freeVariables: Identifiers{ "b", }, @@ -116514,7 +118118,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8409, + context: p10485, freeVariables: nil, }, Value: "number", @@ -116542,7 +118146,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8409, + context: p10485, freeVariables: Identifiers{ "b", "std", @@ -116562,7 +118166,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8409, + context: p10485, freeVariables: Identifiers{ "b", "std", @@ -116582,7 +118186,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8409, + context: p10485, freeVariables: nil, }, Value: "std.max second param expected number, got ", @@ -116604,7 +118208,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8409, + context: p10485, freeVariables: Identifiers{ "b", "std", @@ -116624,7 +118228,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8409, + context: p10485, freeVariables: Identifiers{ "std", }, @@ -116643,7 +118247,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8409, + context: p10485, freeVariables: Identifiers{ "std", }, @@ -116689,7 +118293,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8487, + context: p10563, freeVariables: Identifiers{ "b", }, @@ -116718,7 +118322,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8409, + context: p10485, freeVariables: Identifiers{ "a", "b", @@ -116738,7 +118342,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8409, + context: p10485, freeVariables: Identifiers{ "a", "b", @@ -116758,7 +118362,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8409, + context: p10485, freeVariables: Identifiers{ "a", }, @@ -116780,7 +118384,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8409, + context: p10485, freeVariables: Identifiers{ "b", }, @@ -116802,7 +118406,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8409, + context: p10485, freeVariables: Identifiers{ "a", }, @@ -116823,7 +118427,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8409, + context: p10485, freeVariables: Identifiers{ "b", }, @@ -116894,7 +118498,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p10581, freeVariables: nil, }, }, @@ -116916,7 +118520,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p10583, freeVariables: nil, }, Value: "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/", @@ -117158,7 +118762,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p10607, freeVariables: Identifiers{ "base64_table", "i", @@ -117182,7 +118786,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p10611, freeVariables: Identifiers{ "base64_table", "i", @@ -117202,7 +118806,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p10614, freeVariables: Identifiers{ "base64_table", }, @@ -117223,7 +118827,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p10617, freeVariables: Identifiers{ "i", }, @@ -117246,7 +118850,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p43, + context: p10620, freeVariables: Identifiers{ "i", }, @@ -117275,7 +118879,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p10623, freeVariables: Identifiers{ "std", }, @@ -117294,7 +118898,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p10626, freeVariables: Identifiers{ "std", }, @@ -117313,7 +118917,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p10629, freeVariables: Identifiers{ "std", }, @@ -117359,7 +118963,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p54, + context: p10634, freeVariables: nil, }, Value: float64(0), @@ -117379,14 +118983,14 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p54, + context: p10636, freeVariables: nil, }, Value: float64(63), OriginalString: "63", }, }, - Named: nil, + Named: []NamedArgument{}, }, TrailingComma: false, TailStrict: false, @@ -117442,7 +119046,7 @@ var StdAst = &DesugaredObject{ }, file: nil, }, - context: p9, + context: p68, freeVariables: Identifiers{ "std", }, @@ -117468,7 +119072,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8507, + context: p10642, freeVariables: Identifiers{ "arrs", "std", @@ -117488,7 +119092,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8507, + context: p10642, freeVariables: Identifiers{ "std", }, @@ -117507,7 +119111,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8507, + context: p10642, freeVariables: Identifiers{ "std", }, @@ -117553,7 +119157,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8516, + context: p10651, freeVariables: nil, }, Parameters: Parameters{ @@ -117578,7 +119182,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8519, + context: p10654, freeVariables: Identifiers{ "a", "b", @@ -117598,7 +119202,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8519, + context: p10654, freeVariables: Identifiers{ "a", }, @@ -117620,7 +119224,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8519, + context: p10654, freeVariables: Identifiers{ "b", }, @@ -117643,7 +119247,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8516, + context: p10651, freeVariables: Identifiers{ "arrs", }, @@ -117664,7 +119268,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8516, + context: p10651, freeVariables: nil, }, Elements: nil, @@ -117737,7 +119341,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p10667, freeVariables: nil, }, }, @@ -117759,7 +119363,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p10669, freeVariables: nil, }, Value: "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/", @@ -118001,7 +119605,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p10693, freeVariables: Identifiers{ "base64_table", "i", @@ -118025,7 +119629,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p10697, freeVariables: Identifiers{ "base64_table", "i", @@ -118045,7 +119649,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p10700, freeVariables: Identifiers{ "base64_table", }, @@ -118066,7 +119670,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p10703, freeVariables: Identifiers{ "i", }, @@ -118089,7 +119693,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p43, + context: p10706, freeVariables: Identifiers{ "i", }, @@ -118118,7 +119722,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p10709, freeVariables: Identifiers{ "std", }, @@ -118137,7 +119741,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p10712, freeVariables: Identifiers{ "std", }, @@ -118156,7 +119760,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p10715, freeVariables: Identifiers{ "std", }, @@ -118202,7 +119806,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p54, + context: p10720, freeVariables: nil, }, Value: float64(0), @@ -118222,14 +119826,14 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p54, + context: p10722, freeVariables: nil, }, Value: float64(63), OriginalString: "63", }, }, - Named: nil, + Named: []NamedArgument{}, }, TrailingComma: false, TailStrict: false, @@ -118285,7 +119889,7 @@ var StdAst = &DesugaredObject{ }, file: nil, }, - context: p9, + context: p68, freeVariables: Identifiers{ "std", }, @@ -118311,7 +119915,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8534, + context: p10728, freeVariables: Identifiers{ "ini", "std", @@ -118334,7 +119938,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8538, + context: p10732, freeVariables: Identifiers{ "std", }, @@ -118360,7 +119964,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8542, + context: p10736, freeVariables: Identifiers{ "body", "std", @@ -118380,7 +119984,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8542, + context: p10736, freeVariables: Identifiers{ "std", }, @@ -118399,7 +120003,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8542, + context: p10736, freeVariables: Identifiers{ "std", }, @@ -118445,7 +120049,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8551, + context: p10745, freeVariables: nil, }, Elements: nil, @@ -118599,7 +120203,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8567, + context: p10761, freeVariables: Identifiers{ "body", "k", @@ -118623,7 +120227,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8571, + context: p10765, freeVariables: Identifiers{ "body", "k", @@ -118643,7 +120247,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8571, + context: p10765, freeVariables: Identifiers{ "body", }, @@ -118664,7 +120268,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8571, + context: p10765, freeVariables: Identifiers{ "k", }, @@ -118690,7 +120294,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8567, + context: p10761, freeVariables: Identifiers{ "k", "std", @@ -118796,7 +120400,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8567, + context: p10761, freeVariables: Identifiers{ "std", "value_or_values", @@ -118816,7 +120420,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8567, + context: p10761, freeVariables: Identifiers{ "std", }, @@ -118835,7 +120439,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8567, + context: p10761, freeVariables: Identifiers{ "std", }, @@ -118881,7 +120485,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8596, + context: p10790, freeVariables: Identifiers{ "value_or_values", }, @@ -118908,7 +120512,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8567, + context: p10761, freeVariables: nil, }, Value: "array", @@ -119156,7 +120760,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8622, + context: p10816, freeVariables: nil, }, Value: "%s = %s", @@ -119177,7 +120781,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8622, + context: p10816, freeVariables: Identifiers{ "k", "value", @@ -119198,7 +120802,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8627, + context: p10821, freeVariables: Identifiers{ "k", }, @@ -119219,7 +120823,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8627, + context: p10821, freeVariables: Identifiers{ "value", }, @@ -119253,7 +120857,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8567, + context: p10761, freeVariables: Identifiers{ "value_or_values", }, @@ -119280,7 +120884,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8567, + context: p10761, freeVariables: Identifiers{ "k", "std", @@ -119388,7 +120992,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8645, + context: p10839, freeVariables: nil, }, Value: "%s = %s", @@ -119409,7 +121013,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8645, + context: p10839, freeVariables: Identifiers{ "k", "value_or_values", @@ -119430,7 +121034,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8650, + context: p10844, freeVariables: Identifiers{ "k", }, @@ -119451,7 +121055,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8650, + context: p10844, freeVariables: Identifiers{ "value_or_values", }, @@ -119490,7 +121094,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8551, + context: p10745, freeVariables: Identifiers{ "body", "std", @@ -119510,7 +121114,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8551, + context: p10745, freeVariables: Identifiers{ "std", }, @@ -119529,7 +121133,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8551, + context: p10745, freeVariables: Identifiers{ "std", }, @@ -119575,7 +121179,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8663, + context: p10857, freeVariables: Identifiers{ "body", }, @@ -119618,7 +121222,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8534, + context: p10728, freeVariables: Identifiers{ "body_lines", "ini", @@ -119642,7 +121246,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8669, + context: p10863, freeVariables: Identifiers{ "body_lines", "std", @@ -119670,7 +121274,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8673, + context: p10867, freeVariables: Identifiers{ "body_lines", "sbody", @@ -119692,7 +121296,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8673, + context: p10867, freeVariables: Identifiers{ "sname", "std", @@ -119798,7 +121402,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8687, + context: p10881, freeVariables: nil, }, Value: "[%s]", @@ -119819,7 +121423,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8687, + context: p10881, freeVariables: Identifiers{ "sname", }, @@ -119839,7 +121443,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8692, + context: p10886, freeVariables: Identifiers{ "sname", }, @@ -119873,7 +121477,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8673, + context: p10867, freeVariables: Identifiers{ "body_lines", "sbody", @@ -119893,7 +121497,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8673, + context: p10867, freeVariables: Identifiers{ "body_lines", }, @@ -119916,7 +121520,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8700, + context: p10894, freeVariables: Identifiers{ "sbody", }, @@ -119949,7 +121553,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8703, + context: p10897, freeVariables: Identifiers{ "body_lines", "ini", @@ -119970,7 +121574,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8703, + context: p10897, freeVariables: Identifiers{ "ini", "std", @@ -119990,7 +121594,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8703, + context: p10897, freeVariables: Identifiers{ "std", }, @@ -120009,7 +121613,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8703, + context: p10897, freeVariables: Identifiers{ "std", }, @@ -120055,7 +121659,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8714, + context: p10908, freeVariables: Identifiers{ "ini", }, @@ -120076,7 +121680,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8714, + context: p10908, freeVariables: nil, }, Value: "main", @@ -120103,7 +121707,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8703, + context: p10897, freeVariables: Identifiers{ "body_lines", "ini", @@ -120123,7 +121727,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8703, + context: p10897, freeVariables: Identifiers{ "body_lines", }, @@ -120146,7 +121750,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8723, + context: p10917, freeVariables: Identifiers{ "ini", }, @@ -120165,7 +121769,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8723, + context: p10917, freeVariables: Identifiers{ "ini", }, @@ -120215,7 +121819,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8703, + context: p10897, freeVariables: nil, }, Elements: nil, @@ -120375,7 +121979,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8744, + context: p10938, freeVariables: Identifiers{ "ini", "k", @@ -120396,7 +122000,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8744, + context: p10938, freeVariables: Identifiers{ "section_lines", }, @@ -120419,7 +122023,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8750, + context: p10944, freeVariables: Identifiers{ "k", }, @@ -120440,7 +122044,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8750, + context: p10944, freeVariables: Identifiers{ "ini", "k", @@ -120460,7 +122064,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8750, + context: p10944, freeVariables: Identifiers{ "ini", }, @@ -120479,7 +122083,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8750, + context: p10944, freeVariables: Identifiers{ "ini", }, @@ -120523,7 +122127,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8750, + context: p10944, freeVariables: Identifiers{ "k", }, @@ -120556,7 +122160,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8762, + context: p10956, freeVariables: Identifiers{ "ini", "std", @@ -120576,7 +122180,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8762, + context: p10956, freeVariables: Identifiers{ "std", }, @@ -120595,7 +122199,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8762, + context: p10956, freeVariables: Identifiers{ "std", }, @@ -120641,7 +122245,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8771, + context: p10965, freeVariables: Identifiers{ "ini", }, @@ -120660,7 +122264,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8771, + context: p10965, freeVariables: Identifiers{ "ini", }, @@ -120719,7 +122323,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8534, + context: p10728, freeVariables: Identifiers{ "all_sections", "main_body", @@ -120740,7 +122344,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8534, + context: p10728, freeVariables: Identifiers{ "std", }, @@ -120759,7 +122363,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8534, + context: p10728, freeVariables: Identifiers{ "std", }, @@ -120805,7 +122409,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8785, + context: p10979, freeVariables: nil, }, Value: "\n", @@ -120826,7 +122430,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8785, + context: p10979, freeVariables: Identifiers{ "all_sections", "main_body", @@ -120847,7 +122451,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8785, + context: p10979, freeVariables: Identifiers{ "all_sections", "main_body", @@ -120868,7 +122472,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8785, + context: p10979, freeVariables: Identifiers{ "main_body", }, @@ -120890,7 +122494,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8785, + context: p10979, freeVariables: Identifiers{ "all_sections", "std", @@ -120910,7 +122514,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8785, + context: p10979, freeVariables: Identifiers{ "std", }, @@ -120929,7 +122533,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8785, + context: p10979, freeVariables: Identifiers{ "std", }, @@ -120975,7 +122579,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8801, + context: p10995, freeVariables: Identifiers{ "all_sections", }, @@ -121004,7 +122608,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8785, + context: p10979, freeVariables: nil, }, Elements: Nodes{ @@ -121022,7 +122626,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8806, + context: p11000, freeVariables: nil, }, Value: "", @@ -121102,7 +122706,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p11005, freeVariables: nil, }, }, @@ -121124,7 +122728,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p11007, freeVariables: nil, }, Value: "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/", @@ -121366,7 +122970,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p11031, freeVariables: Identifiers{ "base64_table", "i", @@ -121390,7 +122994,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p11035, freeVariables: Identifiers{ "base64_table", "i", @@ -121410,7 +123014,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p11038, freeVariables: Identifiers{ "base64_table", }, @@ -121431,7 +123035,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p11041, freeVariables: Identifiers{ "i", }, @@ -121454,7 +123058,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p43, + context: p11044, freeVariables: Identifiers{ "i", }, @@ -121483,7 +123087,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p11047, freeVariables: Identifiers{ "std", }, @@ -121502,7 +123106,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p11050, freeVariables: Identifiers{ "std", }, @@ -121521,7 +123125,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p11053, freeVariables: Identifiers{ "std", }, @@ -121567,7 +123171,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p54, + context: p11058, freeVariables: nil, }, Value: float64(0), @@ -121587,14 +123191,14 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p54, + context: p11060, freeVariables: nil, }, Value: float64(63), OriginalString: "63", }, }, - Named: nil, + Named: []NamedArgument{}, }, TrailingComma: false, TailStrict: false, @@ -121650,7 +123254,7 @@ var StdAst = &DesugaredObject{ }, file: nil, }, - context: p9, + context: p68, freeVariables: Identifiers{ "std", }, @@ -121676,7 +123280,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8813, + context: p11066, freeVariables: Identifiers{ "std", "str_", @@ -121699,7 +123303,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8817, + context: p11070, freeVariables: Identifiers{ "std", "str_", @@ -121719,7 +123323,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8817, + context: p11070, freeVariables: Identifiers{ "std", }, @@ -121738,7 +123342,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8817, + context: p11070, freeVariables: Identifiers{ "std", }, @@ -121784,7 +123388,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8826, + context: p11079, freeVariables: Identifiers{ "str_", }, @@ -121814,7 +123418,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8813, + context: p11066, freeVariables: Identifiers{ "std", "str", @@ -121837,7 +123441,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8832, + context: p11085, freeVariables: Identifiers{ "std", }, @@ -121863,7 +123467,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8836, + context: p11089, freeVariables: Identifiers{ "ch", "std", @@ -121968,7 +123572,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8836, + context: p11089, freeVariables: Identifiers{ "ch", }, @@ -121989,7 +123593,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8836, + context: p11089, freeVariables: nil, }, Value: "\"", @@ -122016,7 +123620,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8836, + context: p11089, freeVariables: nil, }, Value: "\\\"", @@ -122037,7 +123641,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8836, + context: p11089, freeVariables: Identifiers{ "ch", "std", @@ -122142,7 +123746,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8836, + context: p11089, freeVariables: Identifiers{ "ch", }, @@ -122163,7 +123767,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8836, + context: p11089, freeVariables: nil, }, Value: "\\", @@ -122190,7 +123794,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8836, + context: p11089, freeVariables: nil, }, Value: "\\\\", @@ -122211,7 +123815,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8836, + context: p11089, freeVariables: Identifiers{ "ch", "std", @@ -122316,7 +123920,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8836, + context: p11089, freeVariables: Identifiers{ "ch", }, @@ -122337,7 +123941,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8836, + context: p11089, freeVariables: nil, }, Value: "\b", @@ -122364,7 +123968,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8836, + context: p11089, freeVariables: nil, }, Value: "\\b", @@ -122385,7 +123989,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8836, + context: p11089, freeVariables: Identifiers{ "ch", "std", @@ -122490,7 +124094,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8836, + context: p11089, freeVariables: Identifiers{ "ch", }, @@ -122511,7 +124115,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8836, + context: p11089, freeVariables: nil, }, Value: "\f", @@ -122538,7 +124142,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8836, + context: p11089, freeVariables: nil, }, Value: "\\f", @@ -122559,7 +124163,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8836, + context: p11089, freeVariables: Identifiers{ "ch", "std", @@ -122664,7 +124268,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8836, + context: p11089, freeVariables: Identifiers{ "ch", }, @@ -122685,7 +124289,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8836, + context: p11089, freeVariables: nil, }, Value: "\n", @@ -122712,7 +124316,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8836, + context: p11089, freeVariables: nil, }, Value: "\\n", @@ -122733,7 +124337,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8836, + context: p11089, freeVariables: Identifiers{ "ch", "std", @@ -122838,7 +124442,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8836, + context: p11089, freeVariables: Identifiers{ "ch", }, @@ -122859,7 +124463,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8836, + context: p11089, freeVariables: nil, }, Value: "\r", @@ -122886,7 +124490,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8836, + context: p11089, freeVariables: nil, }, Value: "\\r", @@ -122907,7 +124511,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8836, + context: p11089, freeVariables: Identifiers{ "ch", "std", @@ -123012,7 +124616,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8836, + context: p11089, freeVariables: Identifiers{ "ch", }, @@ -123033,7 +124637,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8836, + context: p11089, freeVariables: nil, }, Value: "\t", @@ -123060,7 +124664,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8836, + context: p11089, freeVariables: nil, }, Value: "\\t", @@ -123081,7 +124685,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8836, + context: p11089, freeVariables: Identifiers{ "ch", "std", @@ -123104,7 +124708,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8938, + context: p11191, freeVariables: Identifiers{ "ch", "std", @@ -123124,7 +124728,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8938, + context: p11191, freeVariables: Identifiers{ "std", }, @@ -123143,7 +124747,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8938, + context: p11191, freeVariables: Identifiers{ "std", }, @@ -123189,7 +124793,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8947, + context: p11200, freeVariables: Identifiers{ "ch", }, @@ -123219,7 +124823,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8836, + context: p11089, freeVariables: Identifiers{ "ch", "cp", @@ -123240,7 +124844,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8836, + context: p11089, freeVariables: Identifiers{ "cp", }, @@ -123259,7 +124863,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8836, + context: p11089, freeVariables: Identifiers{ "cp", }, @@ -123278,7 +124882,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8836, + context: p11089, freeVariables: Identifiers{ "cp", }, @@ -123300,7 +124904,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8836, + context: p11089, freeVariables: nil, }, Value: float64(32), @@ -123322,7 +124926,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8836, + context: p11089, freeVariables: Identifiers{ "cp", }, @@ -123341,7 +124945,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8836, + context: p11089, freeVariables: Identifiers{ "cp", }, @@ -123360,7 +124964,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8836, + context: p11089, freeVariables: Identifiers{ "cp", }, @@ -123382,7 +124986,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8836, + context: p11089, freeVariables: nil, }, Value: float64(126), @@ -123404,7 +125008,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8836, + context: p11089, freeVariables: Identifiers{ "cp", }, @@ -123423,7 +125027,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8836, + context: p11089, freeVariables: Identifiers{ "cp", }, @@ -123445,7 +125049,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8836, + context: p11089, freeVariables: nil, }, Value: float64(159), @@ -123553,7 +125157,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8836, + context: p11089, freeVariables: nil, }, Value: "\\u%04x", @@ -123574,7 +125178,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8836, + context: p11089, freeVariables: Identifiers{ "cp", }, @@ -123594,7 +125198,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8983, + context: p11236, freeVariables: Identifiers{ "cp", }, @@ -123624,7 +125228,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8836, + context: p11089, freeVariables: Identifiers{ "ch", }, @@ -123744,7 +125348,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8813, + context: p11066, freeVariables: nil, }, Value: "\"%s\"", @@ -123765,7 +125369,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8813, + context: p11066, freeVariables: Identifiers{ "std", "str", @@ -123786,7 +125390,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8813, + context: p11066, freeVariables: Identifiers{ "std", }, @@ -123805,7 +125409,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p8813, + context: p11066, freeVariables: Identifiers{ "std", }, @@ -123851,7 +125455,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9005, + context: p11258, freeVariables: nil, }, Value: "", @@ -124005,7 +125609,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9021, + context: p11274, freeVariables: Identifiers{ "ch", "trans", @@ -124025,7 +125629,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9021, + context: p11274, freeVariables: Identifiers{ "trans", }, @@ -124048,7 +125652,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9027, + context: p11280, freeVariables: Identifiers{ "ch", }, @@ -124079,7 +125683,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9005, + context: p11258, freeVariables: Identifiers{ "std", "str", @@ -124099,7 +125703,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9005, + context: p11258, freeVariables: Identifiers{ "std", }, @@ -124118,7 +125722,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9005, + context: p11258, freeVariables: Identifiers{ "std", }, @@ -124164,7 +125768,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9038, + context: p11291, freeVariables: Identifiers{ "str", }, @@ -124258,7 +125862,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p11297, freeVariables: nil, }, }, @@ -124280,7 +125884,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p11299, freeVariables: nil, }, Value: "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/", @@ -124522,7 +126126,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p11323, freeVariables: Identifiers{ "base64_table", "i", @@ -124546,7 +126150,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p11327, freeVariables: Identifiers{ "base64_table", "i", @@ -124566,7 +126170,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p11330, freeVariables: Identifiers{ "base64_table", }, @@ -124587,7 +126191,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p11333, freeVariables: Identifiers{ "i", }, @@ -124610,7 +126214,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p43, + context: p11336, freeVariables: Identifiers{ "i", }, @@ -124639,7 +126243,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p11339, freeVariables: Identifiers{ "std", }, @@ -124658,7 +126262,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p11342, freeVariables: Identifiers{ "std", }, @@ -124677,7 +126281,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p11345, freeVariables: Identifiers{ "std", }, @@ -124723,7 +126327,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p54, + context: p11350, freeVariables: nil, }, Value: float64(0), @@ -124743,14 +126347,14 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p54, + context: p11352, freeVariables: nil, }, Value: float64(63), OriginalString: "63", }, }, - Named: nil, + Named: []NamedArgument{}, }, TrailingComma: false, TailStrict: false, @@ -124806,7 +126410,7 @@ var StdAst = &DesugaredObject{ }, file: nil, }, - context: p9, + context: p68, freeVariables: Identifiers{ "std", }, @@ -124832,7 +126436,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9046, + context: p11358, freeVariables: Identifiers{ "std", "str", @@ -124852,7 +126456,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9046, + context: p11358, freeVariables: Identifiers{ "std", }, @@ -124871,7 +126475,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9046, + context: p11358, freeVariables: Identifiers{ "std", }, @@ -124917,7 +126521,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9055, + context: p11367, freeVariables: Identifiers{ "str", }, @@ -124991,7 +126595,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p11373, freeVariables: nil, }, }, @@ -125013,7 +126617,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p11375, freeVariables: nil, }, Value: "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/", @@ -125255,7 +126859,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p11399, freeVariables: Identifiers{ "base64_table", "i", @@ -125279,7 +126883,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p11403, freeVariables: Identifiers{ "base64_table", "i", @@ -125299,7 +126903,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p11406, freeVariables: Identifiers{ "base64_table", }, @@ -125320,7 +126924,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p11409, freeVariables: Identifiers{ "i", }, @@ -125343,7 +126947,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p43, + context: p11412, freeVariables: Identifiers{ "i", }, @@ -125372,7 +126976,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p11415, freeVariables: Identifiers{ "std", }, @@ -125391,7 +126995,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p11418, freeVariables: Identifiers{ "std", }, @@ -125410,7 +127014,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p11421, freeVariables: Identifiers{ "std", }, @@ -125456,7 +127060,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p54, + context: p11426, freeVariables: nil, }, Value: float64(0), @@ -125476,14 +127080,14 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p54, + context: p11428, freeVariables: nil, }, Value: float64(63), OriginalString: "63", }, }, - Named: nil, + Named: []NamedArgument{}, }, TrailingComma: false, TailStrict: false, @@ -125539,7 +127143,7 @@ var StdAst = &DesugaredObject{ }, file: nil, }, - context: p9, + context: p68, freeVariables: Identifiers{ "std", }, @@ -125565,7 +127169,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9063, + context: p11434, freeVariables: Identifiers{ "std", "str_", @@ -125588,7 +127192,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9067, + context: p11438, freeVariables: Identifiers{ "std", "str_", @@ -125608,7 +127212,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9067, + context: p11438, freeVariables: Identifiers{ "std", }, @@ -125627,7 +127231,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9067, + context: p11438, freeVariables: Identifiers{ "std", }, @@ -125673,7 +127277,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9076, + context: p11447, freeVariables: Identifiers{ "str_", }, @@ -125703,7 +127307,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9063, + context: p11434, freeVariables: Identifiers{ "std", "str", @@ -125726,7 +127330,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9082, + context: p11453, freeVariables: Identifiers{ "std", }, @@ -125752,7 +127356,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9086, + context: p11457, freeVariables: Identifiers{ "ch", "std", @@ -125857,7 +127461,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9086, + context: p11457, freeVariables: Identifiers{ "ch", }, @@ -125878,7 +127482,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9086, + context: p11457, freeVariables: nil, }, Value: "'", @@ -125905,7 +127509,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9086, + context: p11457, freeVariables: nil, }, Value: "'\"'\"'", @@ -125926,7 +127530,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9086, + context: p11457, freeVariables: Identifiers{ "ch", }, @@ -126038,7 +127642,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9063, + context: p11434, freeVariables: nil, }, Value: "'%s'", @@ -126059,7 +127663,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9063, + context: p11434, freeVariables: Identifiers{ "std", "str", @@ -126080,7 +127684,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9063, + context: p11434, freeVariables: Identifiers{ "std", }, @@ -126099,7 +127703,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9063, + context: p11434, freeVariables: Identifiers{ "std", }, @@ -126145,7 +127749,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9120, + context: p11491, freeVariables: nil, }, Value: "", @@ -126299,7 +127903,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9136, + context: p11507, freeVariables: Identifiers{ "ch", "trans", @@ -126319,7 +127923,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9136, + context: p11507, freeVariables: Identifiers{ "trans", }, @@ -126342,7 +127946,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9142, + context: p11513, freeVariables: Identifiers{ "ch", }, @@ -126373,7 +127977,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9120, + context: p11491, freeVariables: Identifiers{ "std", "str", @@ -126393,7 +127997,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9120, + context: p11491, freeVariables: Identifiers{ "std", }, @@ -126412,7 +128016,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9120, + context: p11491, freeVariables: Identifiers{ "std", }, @@ -126458,7 +128062,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9153, + context: p11524, freeVariables: Identifiers{ "str", }, @@ -126552,7 +128156,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p11530, freeVariables: nil, }, }, @@ -126574,7 +128178,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p11532, freeVariables: nil, }, Value: "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/", @@ -126816,7 +128420,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p11556, freeVariables: Identifiers{ "base64_table", "i", @@ -126840,7 +128444,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p11560, freeVariables: Identifiers{ "base64_table", "i", @@ -126860,7 +128464,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p11563, freeVariables: Identifiers{ "base64_table", }, @@ -126881,7 +128485,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p11566, freeVariables: Identifiers{ "i", }, @@ -126904,7 +128508,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p43, + context: p11569, freeVariables: Identifiers{ "i", }, @@ -126933,7 +128537,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p11572, freeVariables: Identifiers{ "std", }, @@ -126952,7 +128556,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p11575, freeVariables: Identifiers{ "std", }, @@ -126971,7 +128575,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p11578, freeVariables: Identifiers{ "std", }, @@ -127017,7 +128621,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p54, + context: p11583, freeVariables: nil, }, Value: float64(0), @@ -127037,14 +128641,14 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p54, + context: p11585, freeVariables: nil, }, Value: float64(63), OriginalString: "63", }, }, - Named: nil, + Named: []NamedArgument{}, }, TrailingComma: false, TailStrict: false, @@ -127100,7 +128704,7 @@ var StdAst = &DesugaredObject{ }, file: nil, }, - context: p9, + context: p68, freeVariables: Identifiers{ "std", }, @@ -127126,7 +128730,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9161, + context: p11591, freeVariables: Identifiers{ "std", "str_", @@ -127149,7 +128753,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9165, + context: p11595, freeVariables: Identifiers{ "std", "str_", @@ -127169,7 +128773,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9165, + context: p11595, freeVariables: Identifiers{ "std", }, @@ -127188,7 +128792,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9165, + context: p11595, freeVariables: Identifiers{ "std", }, @@ -127234,7 +128838,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9174, + context: p11604, freeVariables: Identifiers{ "str_", }, @@ -127264,7 +128868,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9161, + context: p11591, freeVariables: Identifiers{ "std", "str", @@ -127287,7 +128891,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9180, + context: p11610, freeVariables: Identifiers{ "std", }, @@ -127313,7 +128917,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9184, + context: p11614, freeVariables: Identifiers{ "ch", "std", @@ -127418,7 +129022,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9184, + context: p11614, freeVariables: Identifiers{ "ch", }, @@ -127439,7 +129043,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9184, + context: p11614, freeVariables: nil, }, Value: "$", @@ -127466,7 +129070,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9184, + context: p11614, freeVariables: nil, }, Value: "$$", @@ -127487,7 +129091,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9184, + context: p11614, freeVariables: Identifiers{ "ch", }, @@ -127513,7 +129117,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9161, + context: p11591, freeVariables: Identifiers{ "std", "str", @@ -127534,7 +129138,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9161, + context: p11591, freeVariables: Identifiers{ "std", }, @@ -127553,7 +129157,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9161, + context: p11591, freeVariables: Identifiers{ "std", }, @@ -127599,7 +129203,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9209, + context: p11639, freeVariables: Identifiers{ "trans", }, @@ -127626,7 +129230,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9213, + context: p11643, freeVariables: Identifiers{ "a", "b", @@ -127647,7 +129251,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9213, + context: p11643, freeVariables: Identifiers{ "a", }, @@ -127669,7 +129273,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9213, + context: p11643, freeVariables: Identifiers{ "b", "trans", @@ -127689,7 +129293,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9213, + context: p11643, freeVariables: Identifiers{ "trans", }, @@ -127712,7 +129316,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9223, + context: p11653, freeVariables: Identifiers{ "b", }, @@ -127741,7 +129345,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9209, + context: p11639, freeVariables: Identifiers{ "std", "str", @@ -127761,7 +129365,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9209, + context: p11639, freeVariables: Identifiers{ "std", }, @@ -127780,7 +129384,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9209, + context: p11639, freeVariables: Identifiers{ "std", }, @@ -127826,7 +129430,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9234, + context: p11664, freeVariables: Identifiers{ "str", }, @@ -127853,7 +129457,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9209, + context: p11639, freeVariables: nil, }, Value: "", @@ -127929,7 +129533,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p11671, freeVariables: nil, }, }, @@ -127951,7 +129555,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p11673, freeVariables: nil, }, Value: "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/", @@ -128193,7 +129797,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p11697, freeVariables: Identifiers{ "base64_table", "i", @@ -128217,7 +129821,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p11701, freeVariables: Identifiers{ "base64_table", "i", @@ -128237,7 +129841,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p11704, freeVariables: Identifiers{ "base64_table", }, @@ -128258,7 +129862,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p11707, freeVariables: Identifiers{ "i", }, @@ -128281,7 +129885,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p43, + context: p11710, freeVariables: Identifiers{ "i", }, @@ -128310,7 +129914,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p11713, freeVariables: Identifiers{ "std", }, @@ -128329,7 +129933,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p11716, freeVariables: Identifiers{ "std", }, @@ -128348,7 +129952,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p11719, freeVariables: Identifiers{ "std", }, @@ -128394,7 +129998,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p54, + context: p11724, freeVariables: nil, }, Value: float64(0), @@ -128414,14 +130018,14 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p54, + context: p11726, freeVariables: nil, }, Value: float64(63), OriginalString: "63", }, }, - Named: nil, + Named: []NamedArgument{}, }, TrailingComma: false, TailStrict: false, @@ -128477,7 +130081,7 @@ var StdAst = &DesugaredObject{ }, file: nil, }, - context: p9, + context: p68, freeVariables: Identifiers{ "std", }, @@ -128503,7 +130107,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9243, + context: p11732, freeVariables: Identifiers{ "std", "value", @@ -128523,7 +130127,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9243, + context: p11732, freeVariables: Identifiers{ "std", }, @@ -128542,7 +130146,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9243, + context: p11732, freeVariables: Identifiers{ "std", }, @@ -128588,7 +130192,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9252, + context: p11741, freeVariables: Identifiers{ "value", }, @@ -128609,7 +130213,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9252, + context: p11741, freeVariables: nil, }, Value: " ", @@ -128683,7 +130287,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p11748, freeVariables: nil, }, }, @@ -128705,7 +130309,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p11750, freeVariables: nil, }, Value: "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/", @@ -128947,7 +130551,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p11774, freeVariables: Identifiers{ "base64_table", "i", @@ -128971,7 +130575,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p11778, freeVariables: Identifiers{ "base64_table", "i", @@ -128991,7 +130595,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p11781, freeVariables: Identifiers{ "base64_table", }, @@ -129012,7 +130616,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p11784, freeVariables: Identifiers{ "i", }, @@ -129035,7 +130639,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p43, + context: p11787, freeVariables: Identifiers{ "i", }, @@ -129064,7 +130668,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p11790, freeVariables: Identifiers{ "std", }, @@ -129083,7 +130687,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p11793, freeVariables: Identifiers{ "std", }, @@ -129102,7 +130706,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p11796, freeVariables: Identifiers{ "std", }, @@ -129148,7 +130752,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p54, + context: p11801, freeVariables: nil, }, Value: float64(0), @@ -129168,14 +130772,14 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p54, + context: p11803, freeVariables: nil, }, Value: float64(63), OriginalString: "63", }, }, - Named: nil, + Named: []NamedArgument{}, }, TrailingComma: false, TailStrict: false, @@ -129231,7 +130835,7 @@ var StdAst = &DesugaredObject{ }, file: nil, }, - context: p9, + context: p68, freeVariables: Identifiers{ "std", }, @@ -129258,7 +130862,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9261, + context: p11809, freeVariables: Identifiers{ "indent", "std", @@ -129282,7 +130886,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9265, + context: p11813, freeVariables: Identifiers{ "aux", "indent", @@ -129312,7 +130916,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9269, + context: p11817, freeVariables: Identifiers{ "aux", "cindent", @@ -129421,7 +131025,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9269, + context: p11817, freeVariables: Identifiers{ "v", }, @@ -129442,7 +131046,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9269, + context: p11817, freeVariables: nil, }, Value: true, @@ -129467,7 +131071,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9269, + context: p11817, freeVariables: nil, }, Value: "true", @@ -129488,7 +131092,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9269, + context: p11817, freeVariables: Identifiers{ "aux", "cindent", @@ -129597,7 +131201,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9269, + context: p11817, freeVariables: Identifiers{ "v", }, @@ -129618,7 +131222,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9269, + context: p11817, freeVariables: nil, }, Value: false, @@ -129643,7 +131247,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9269, + context: p11817, freeVariables: nil, }, Value: "false", @@ -129664,7 +131268,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9269, + context: p11817, freeVariables: Identifiers{ "aux", "cindent", @@ -129773,7 +131377,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9269, + context: p11817, freeVariables: Identifiers{ "v", }, @@ -129794,7 +131398,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9269, + context: p11817, freeVariables: nil, }, }, @@ -129818,7 +131422,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9269, + context: p11817, freeVariables: nil, }, Value: "null", @@ -129839,7 +131443,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9269, + context: p11817, freeVariables: Identifiers{ "aux", "cindent", @@ -129948,7 +131552,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9269, + context: p11817, freeVariables: Identifiers{ "std", "v", @@ -129968,7 +131572,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9269, + context: p11817, freeVariables: Identifiers{ "std", }, @@ -129987,7 +131591,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9269, + context: p11817, freeVariables: Identifiers{ "std", }, @@ -130033,7 +131637,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9330, + context: p11878, freeVariables: Identifiers{ "v", }, @@ -130060,7 +131664,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9269, + context: p11817, freeVariables: nil, }, Value: "number", @@ -130087,7 +131691,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9269, + context: p11817, freeVariables: Identifiers{ "v", }, @@ -130106,7 +131710,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9269, + context: p11817, freeVariables: nil, }, Value: "", @@ -130128,7 +131732,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9269, + context: p11817, freeVariables: Identifiers{ "v", }, @@ -130150,7 +131754,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9269, + context: p11817, freeVariables: Identifiers{ "aux", "cindent", @@ -130259,7 +131863,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9269, + context: p11817, freeVariables: Identifiers{ "std", "v", @@ -130279,7 +131883,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9269, + context: p11817, freeVariables: Identifiers{ "std", }, @@ -130298,7 +131902,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9269, + context: p11817, freeVariables: Identifiers{ "std", }, @@ -130344,7 +131948,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9357, + context: p11905, freeVariables: Identifiers{ "v", }, @@ -130371,7 +131975,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9269, + context: p11817, freeVariables: nil, }, Value: "string", @@ -130398,7 +132002,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9269, + context: p11817, freeVariables: Identifiers{ "std", "v", @@ -130418,7 +132022,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9269, + context: p11817, freeVariables: Identifiers{ "std", }, @@ -130437,7 +132041,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9269, + context: p11817, freeVariables: Identifiers{ "std", }, @@ -130483,7 +132087,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9369, + context: p11917, freeVariables: Identifiers{ "v", }, @@ -130510,7 +132114,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9269, + context: p11817, freeVariables: Identifiers{ "aux", "cindent", @@ -130619,7 +132223,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9269, + context: p11817, freeVariables: Identifiers{ "std", "v", @@ -130639,7 +132243,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9269, + context: p11817, freeVariables: Identifiers{ "std", }, @@ -130658,7 +132262,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9269, + context: p11817, freeVariables: Identifiers{ "std", }, @@ -130704,7 +132308,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9390, + context: p11938, freeVariables: Identifiers{ "v", }, @@ -130731,7 +132335,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9269, + context: p11817, freeVariables: nil, }, Value: "function", @@ -130758,7 +132362,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9269, + context: p11817, freeVariables: Identifiers{ "path", }, @@ -130777,7 +132381,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9269, + context: p11817, freeVariables: Identifiers{ "path", }, @@ -130796,7 +132400,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9269, + context: p11817, freeVariables: nil, }, Value: "Tried to manifest function at ", @@ -130818,7 +132422,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9269, + context: p11817, freeVariables: Identifiers{ "path", }, @@ -130841,7 +132445,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9269, + context: p11817, freeVariables: Identifiers{ "aux", "cindent", @@ -130950,7 +132554,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9269, + context: p11817, freeVariables: Identifiers{ "std", "v", @@ -130970,7 +132574,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9269, + context: p11817, freeVariables: Identifiers{ "std", }, @@ -130989,7 +132593,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9269, + context: p11817, freeVariables: Identifiers{ "std", }, @@ -131035,7 +132639,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9419, + context: p11967, freeVariables: Identifiers{ "v", }, @@ -131062,7 +132666,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9269, + context: p11817, freeVariables: nil, }, Value: "array", @@ -131089,7 +132693,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9269, + context: p11817, freeVariables: Identifiers{ "aux", "cindent", @@ -131116,7 +132720,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9426, + context: p11974, freeVariables: Identifiers{ "std", "v", @@ -131136,7 +132740,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9426, + context: p11974, freeVariables: Identifiers{ "std", }, @@ -131155,7 +132759,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9426, + context: p11974, freeVariables: Identifiers{ "std", }, @@ -131201,7 +132805,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9435, + context: p11983, freeVariables: nil, }, Value: float64(0), @@ -131221,7 +132825,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9435, + context: p11983, freeVariables: Identifiers{ "std", "v", @@ -131241,7 +132845,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9435, + context: p11983, freeVariables: Identifiers{ "std", "v", @@ -131261,7 +132865,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9435, + context: p11983, freeVariables: Identifiers{ "std", }, @@ -131280,7 +132884,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9435, + context: p11983, freeVariables: Identifiers{ "std", }, @@ -131326,7 +132930,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9447, + context: p11995, freeVariables: Identifiers{ "v", }, @@ -131354,7 +132958,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9435, + context: p11983, freeVariables: nil, }, Value: float64(1), @@ -131384,7 +132988,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9269, + context: p11817, freeVariables: Identifiers{ "aux", "cindent", @@ -131412,7 +133016,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9454, + context: p12002, freeVariables: Identifiers{ "cindent", "indent", @@ -131432,7 +133036,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9454, + context: p12002, freeVariables: Identifiers{ "cindent", }, @@ -131454,7 +133058,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9454, + context: p12002, freeVariables: Identifiers{ "indent", }, @@ -131479,7 +133083,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9269, + context: p11817, freeVariables: Identifiers{ "aux", "cindent", @@ -131507,7 +133111,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9464, + context: p12012, freeVariables: Identifiers{ "aux", "cindent", @@ -131532,7 +133136,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9464, + context: p12012, freeVariables: Identifiers{ "aux", "new_indent", @@ -131556,7 +133160,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9464, + context: p12012, freeVariables: nil, }, Elements: Nodes{ @@ -131574,7 +133178,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9471, + context: p12019, freeVariables: nil, }, Value: "[\n", @@ -131599,7 +133203,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9464, + context: p12012, freeVariables: Identifiers{ "aux", "new_indent", @@ -131623,7 +133227,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9464, + context: p12012, freeVariables: Identifiers{ "std", }, @@ -131642,7 +133246,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9464, + context: p12012, freeVariables: Identifiers{ "std", }, @@ -131688,7 +133292,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9481, + context: p12029, freeVariables: nil, }, Elements: Nodes{ @@ -131706,7 +133310,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9484, + context: p12032, freeVariables: nil, }, Value: ",\n", @@ -131872,7 +133476,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9500, + context: p12048, freeVariables: Identifiers{ "aux", "i", @@ -131896,7 +133500,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9504, + context: p12052, freeVariables: Identifiers{ "aux", "i", @@ -131919,7 +133523,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9504, + context: p12052, freeVariables: Identifiers{ "new_indent", }, @@ -131941,7 +133545,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9504, + context: p12052, freeVariables: Identifiers{ "aux", "i", @@ -131964,7 +133568,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9504, + context: p12052, freeVariables: Identifiers{ "aux", }, @@ -131987,7 +133591,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9514, + context: p12062, freeVariables: Identifiers{ "i", "v", @@ -132007,7 +133611,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9514, + context: p12062, freeVariables: Identifiers{ "v", }, @@ -132028,7 +133632,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9514, + context: p12062, freeVariables: Identifiers{ "i", }, @@ -132051,7 +133655,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9514, + context: p12062, freeVariables: Identifiers{ "i", "path", @@ -132071,7 +133675,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9514, + context: p12062, freeVariables: Identifiers{ "path", }, @@ -132093,7 +133697,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9514, + context: p12062, freeVariables: Identifiers{ "i", }, @@ -132113,7 +133717,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9528, + context: p12076, freeVariables: Identifiers{ "i", }, @@ -132138,7 +133742,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9514, + context: p12062, freeVariables: Identifiers{ "new_indent", }, @@ -132173,7 +133777,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9481, + context: p12029, freeVariables: Identifiers{ "range", }, @@ -132208,7 +133812,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9464, + context: p12012, freeVariables: Identifiers{ "cindent", }, @@ -132228,7 +133832,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9538, + context: p12086, freeVariables: Identifiers{ "cindent", }, @@ -132247,7 +133851,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9538, + context: p12086, freeVariables: Identifiers{ "cindent", }, @@ -132266,7 +133870,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9538, + context: p12086, freeVariables: nil, }, Value: "\n", @@ -132288,7 +133892,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9538, + context: p12086, freeVariables: Identifiers{ "cindent", }, @@ -132311,7 +133915,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9538, + context: p12086, freeVariables: nil, }, Value: "]", @@ -132340,7 +133944,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9269, + context: p11817, freeVariables: Identifiers{ "lines", "std", @@ -132360,7 +133964,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9269, + context: p11817, freeVariables: Identifiers{ "std", }, @@ -132379,7 +133983,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9269, + context: p11817, freeVariables: Identifiers{ "std", }, @@ -132425,7 +134029,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9555, + context: p12103, freeVariables: nil, }, Value: "", @@ -132446,7 +134050,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9555, + context: p12103, freeVariables: Identifiers{ "lines", }, @@ -132476,7 +134080,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9269, + context: p11817, freeVariables: Identifiers{ "aux", "cindent", @@ -132585,7 +134189,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9269, + context: p11817, freeVariables: Identifiers{ "std", "v", @@ -132605,7 +134209,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9269, + context: p11817, freeVariables: Identifiers{ "std", }, @@ -132624,7 +134228,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9269, + context: p11817, freeVariables: Identifiers{ "std", }, @@ -132670,7 +134274,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9577, + context: p12125, freeVariables: Identifiers{ "v", }, @@ -132697,7 +134301,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9269, + context: p11817, freeVariables: nil, }, Value: "object", @@ -132724,7 +134328,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9269, + context: p11817, freeVariables: Identifiers{ "aux", "cindent", @@ -132751,7 +134355,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9584, + context: p12132, freeVariables: Identifiers{ "aux", "cindent", @@ -132775,7 +134379,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9584, + context: p12132, freeVariables: Identifiers{ "aux", "cindent", @@ -132799,7 +134403,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9584, + context: p12132, freeVariables: nil, }, Elements: Nodes{ @@ -132817,7 +134421,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9591, + context: p12139, freeVariables: nil, }, Value: "{\n", @@ -132842,7 +134446,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9584, + context: p12132, freeVariables: Identifiers{ "aux", "cindent", @@ -132866,7 +134470,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9584, + context: p12132, freeVariables: Identifiers{ "std", }, @@ -132885,7 +134489,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9584, + context: p12132, freeVariables: Identifiers{ "std", }, @@ -132931,7 +134535,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9601, + context: p12149, freeVariables: nil, }, Elements: Nodes{ @@ -132949,7 +134553,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9604, + context: p12152, freeVariables: nil, }, Value: ",\n", @@ -133119,7 +134723,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9620, + context: p12168, freeVariables: Identifiers{ "aux", "cindent", @@ -133145,7 +134749,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9624, + context: p12172, freeVariables: Identifiers{ "aux", "cindent", @@ -133170,7 +134774,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9624, + context: p12172, freeVariables: Identifiers{ "cindent", "indent", @@ -133192,7 +134796,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9624, + context: p12172, freeVariables: Identifiers{ "cindent", "indent", @@ -133214,7 +134818,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9624, + context: p12172, freeVariables: Identifiers{ "cindent", "indent", @@ -133234,7 +134838,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9624, + context: p12172, freeVariables: Identifiers{ "cindent", }, @@ -133256,7 +134860,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9624, + context: p12172, freeVariables: Identifiers{ "indent", }, @@ -133279,7 +134883,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9624, + context: p12172, freeVariables: Identifiers{ "k", "std", @@ -133299,7 +134903,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9624, + context: p12172, freeVariables: Identifiers{ "std", }, @@ -133318,7 +134922,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9624, + context: p12172, freeVariables: Identifiers{ "std", }, @@ -133364,7 +134968,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9645, + context: p12193, freeVariables: Identifiers{ "k", }, @@ -133393,7 +134997,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9624, + context: p12172, freeVariables: nil, }, Value: ": ", @@ -133416,7 +135020,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9624, + context: p12172, freeVariables: Identifiers{ "aux", "cindent", @@ -133440,7 +135044,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9624, + context: p12172, freeVariables: Identifiers{ "aux", }, @@ -133463,7 +135067,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9654, + context: p12202, freeVariables: Identifiers{ "k", "v", @@ -133483,7 +135087,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9654, + context: p12202, freeVariables: Identifiers{ "v", }, @@ -133504,7 +135108,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9654, + context: p12202, freeVariables: Identifiers{ "k", }, @@ -133527,7 +135131,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9654, + context: p12202, freeVariables: Identifiers{ "k", "path", @@ -133547,7 +135151,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9654, + context: p12202, freeVariables: Identifiers{ "path", }, @@ -133569,7 +135173,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9654, + context: p12202, freeVariables: Identifiers{ "k", }, @@ -133589,7 +135193,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9668, + context: p12216, freeVariables: Identifiers{ "k", }, @@ -133614,7 +135218,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9654, + context: p12202, freeVariables: Identifiers{ "cindent", "indent", @@ -133634,7 +135238,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9654, + context: p12202, freeVariables: Identifiers{ "cindent", }, @@ -133656,7 +135260,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9654, + context: p12202, freeVariables: Identifiers{ "indent", }, @@ -133692,7 +135296,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9601, + context: p12149, freeVariables: Identifiers{ "std", "v", @@ -133712,7 +135316,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9601, + context: p12149, freeVariables: Identifiers{ "std", }, @@ -133731,7 +135335,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9601, + context: p12149, freeVariables: Identifiers{ "std", }, @@ -133777,7 +135381,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9685, + context: p12233, freeVariables: Identifiers{ "v", }, @@ -133818,7 +135422,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9584, + context: p12132, freeVariables: Identifiers{ "cindent", }, @@ -133838,7 +135442,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9691, + context: p12239, freeVariables: Identifiers{ "cindent", }, @@ -133857,7 +135461,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9691, + context: p12239, freeVariables: Identifiers{ "cindent", }, @@ -133876,7 +135480,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9691, + context: p12239, freeVariables: nil, }, Value: "\n", @@ -133898,7 +135502,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9691, + context: p12239, freeVariables: Identifiers{ "cindent", }, @@ -133921,7 +135525,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9691, + context: p12239, freeVariables: nil, }, Value: "}", @@ -133950,7 +135554,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9269, + context: p11817, freeVariables: Identifiers{ "lines", "std", @@ -133970,7 +135574,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9269, + context: p11817, freeVariables: Identifiers{ "std", }, @@ -133989,7 +135593,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9269, + context: p11817, freeVariables: Identifiers{ "std", }, @@ -134035,7 +135639,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9708, + context: p12256, freeVariables: nil, }, Value: "", @@ -134056,7 +135660,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9708, + context: p12256, freeVariables: Identifiers{ "lines", }, @@ -134114,7 +135718,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9261, + context: p11809, freeVariables: Identifiers{ "aux", "value", @@ -134134,7 +135738,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9261, + context: p11809, freeVariables: Identifiers{ "aux", }, @@ -134157,7 +135761,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9718, + context: p12266, freeVariables: Identifiers{ "value", }, @@ -134178,7 +135782,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9718, + context: p12266, freeVariables: nil, }, Elements: nil, @@ -134198,7 +135802,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9718, + context: p12266, freeVariables: nil, }, Value: "", @@ -134273,7 +135877,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p12274, freeVariables: nil, }, }, @@ -134295,7 +135899,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p12276, freeVariables: nil, }, Value: "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/", @@ -134537,7 +136141,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p12300, freeVariables: Identifiers{ "base64_table", "i", @@ -134561,7 +136165,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p12304, freeVariables: Identifiers{ "base64_table", "i", @@ -134581,7 +136185,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p12307, freeVariables: Identifiers{ "base64_table", }, @@ -134602,7 +136206,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p12310, freeVariables: Identifiers{ "i", }, @@ -134625,7 +136229,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p43, + context: p12313, freeVariables: Identifiers{ "i", }, @@ -134654,7 +136258,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p12316, freeVariables: Identifiers{ "std", }, @@ -134673,7 +136277,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p12319, freeVariables: Identifiers{ "std", }, @@ -134692,7 +136296,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p12322, freeVariables: Identifiers{ "std", }, @@ -134738,7 +136342,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p54, + context: p12327, freeVariables: nil, }, Value: float64(0), @@ -134758,14 +136362,14 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p54, + context: p12329, freeVariables: nil, }, Value: float64(63), OriginalString: "63", }, }, - Named: nil, + Named: []NamedArgument{}, }, TrailingComma: false, TailStrict: false, @@ -134821,7 +136425,7 @@ var StdAst = &DesugaredObject{ }, file: nil, }, - context: p9, + context: p68, freeVariables: Identifiers{ "std", }, @@ -134843,11 +136447,11 @@ var StdAst = &DesugaredObject{ }, End: Location{ Line: int(940), - Column: int(37), + Column: int(30), }, file: p1, }, - context: p9728, + context: p12335, freeVariables: Identifiers{ "std", "value", @@ -134866,11 +136470,11 @@ var StdAst = &DesugaredObject{ }, End: Location{ Line: int(939), - Column: int(34), + Column: int(87), }, file: p1, }, - context: p9732, + context: p12339, freeVariables: Identifiers{ "aux", "std", @@ -134879,7 +136483,6 @@ var StdAst = &DesugaredObject{ Parameters: Parameters{ Required: Identifiers{ "v", - "in_array", "in_object", "path", "cindent", @@ -134897,15 +136500,14 @@ var StdAst = &DesugaredObject{ }, End: Location{ Line: int(939), - Column: int(34), + Column: int(87), }, file: p1, }, - context: p9736, + context: p12343, freeVariables: Identifiers{ "aux", "cindent", - "in_array", "in_object", "path", "std", @@ -135011,7 +136613,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9736, + context: p12343, freeVariables: Identifiers{ "v", }, @@ -135032,7 +136634,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9736, + context: p12343, freeVariables: nil, }, Value: true, @@ -135057,7 +136659,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9736, + context: p12343, freeVariables: nil, }, Value: "true", @@ -135074,15 +136676,14 @@ var StdAst = &DesugaredObject{ }, End: Location{ Line: int(939), - Column: int(34), + Column: int(87), }, file: p1, }, - context: p9736, + context: p12343, freeVariables: Identifiers{ "aux", "cindent", - "in_array", "in_object", "path", "std", @@ -135188,7 +136789,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9736, + context: p12343, freeVariables: Identifiers{ "v", }, @@ -135209,7 +136810,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9736, + context: p12343, freeVariables: nil, }, Value: false, @@ -135234,7 +136835,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9736, + context: p12343, freeVariables: nil, }, Value: "false", @@ -135251,15 +136852,14 @@ var StdAst = &DesugaredObject{ }, End: Location{ Line: int(939), - Column: int(34), + Column: int(87), }, file: p1, }, - context: p9736, + context: p12343, freeVariables: Identifiers{ "aux", "cindent", - "in_array", "in_object", "path", "std", @@ -135365,7 +136965,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9736, + context: p12343, freeVariables: Identifiers{ "v", }, @@ -135386,7 +136986,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9736, + context: p12343, freeVariables: nil, }, }, @@ -135410,7 +137010,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9736, + context: p12343, freeVariables: nil, }, Value: "null", @@ -135427,15 +137027,14 @@ var StdAst = &DesugaredObject{ }, End: Location{ Line: int(939), - Column: int(34), + Column: int(87), }, file: p1, }, - context: p9736, + context: p12343, freeVariables: Identifiers{ "aux", "cindent", - "in_array", "in_object", "path", "std", @@ -135541,7 +137140,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9736, + context: p12343, freeVariables: Identifiers{ "std", "v", @@ -135561,7 +137160,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9736, + context: p12343, freeVariables: Identifiers{ "std", }, @@ -135580,7 +137179,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9736, + context: p12343, freeVariables: Identifiers{ "std", }, @@ -135626,7 +137225,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9797, + context: p12404, freeVariables: Identifiers{ "v", }, @@ -135653,7 +137252,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9736, + context: p12343, freeVariables: nil, }, Value: "number", @@ -135680,7 +137279,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9736, + context: p12343, freeVariables: Identifiers{ "v", }, @@ -135699,7 +137298,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9736, + context: p12343, freeVariables: nil, }, Value: "", @@ -135721,7 +137320,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9736, + context: p12343, freeVariables: Identifiers{ "v", }, @@ -135739,15 +137338,14 @@ var StdAst = &DesugaredObject{ }, End: Location{ Line: int(939), - Column: int(34), + Column: int(87), }, file: p1, }, - context: p9736, + context: p12343, freeVariables: Identifiers{ "aux", "cindent", - "in_array", "in_object", "path", "std", @@ -135853,7 +137451,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9736, + context: p12343, freeVariables: Identifiers{ "std", "v", @@ -135873,7 +137471,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9736, + context: p12343, freeVariables: Identifiers{ "std", }, @@ -135892,7 +137490,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9736, + context: p12343, freeVariables: Identifiers{ "std", }, @@ -135938,7 +137536,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9824, + context: p12431, freeVariables: Identifiers{ "v", }, @@ -135965,7 +137563,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9736, + context: p12343, freeVariables: nil, }, Value: "string", @@ -135992,7 +137590,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9736, + context: p12343, freeVariables: Identifiers{ "cindent", "std", @@ -136016,7 +137614,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9831, + context: p12438, freeVariables: Identifiers{ "std", "v", @@ -136036,7 +137634,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9831, + context: p12438, freeVariables: Identifiers{ "std", }, @@ -136055,7 +137653,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9831, + context: p12438, freeVariables: Identifiers{ "std", }, @@ -136101,7 +137699,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9840, + context: p12447, freeVariables: Identifiers{ "v", }, @@ -136131,7 +137729,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9736, + context: p12343, freeVariables: Identifiers{ "cindent", "len", @@ -136238,7 +137836,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9736, + context: p12343, freeVariables: Identifiers{ "len", }, @@ -136259,7 +137857,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9736, + context: p12343, freeVariables: nil, }, Value: float64(0), @@ -136285,7 +137883,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9736, + context: p12343, freeVariables: nil, }, Value: "\"\"", @@ -136306,7 +137904,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9736, + context: p12343, freeVariables: Identifiers{ "cindent", "len", @@ -136414,7 +138012,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9736, + context: p12343, freeVariables: Identifiers{ "len", "v", @@ -136434,7 +138032,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9736, + context: p12343, freeVariables: Identifiers{ "v", }, @@ -136455,7 +138053,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9736, + context: p12343, freeVariables: Identifiers{ "len", }, @@ -136474,7 +138072,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9736, + context: p12343, freeVariables: Identifiers{ "len", }, @@ -136496,7 +138094,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9736, + context: p12343, freeVariables: nil, }, Value: float64(1), @@ -136519,7 +138117,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9736, + context: p12343, freeVariables: nil, }, Value: "\n", @@ -136546,7 +138144,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9736, + context: p12343, freeVariables: Identifiers{ "cindent", "std", @@ -136570,7 +138168,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9880, + context: p12487, freeVariables: Identifiers{ "std", "v", @@ -136590,7 +138188,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9880, + context: p12487, freeVariables: Identifiers{ "std", }, @@ -136609,7 +138207,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9880, + context: p12487, freeVariables: Identifiers{ "std", }, @@ -136655,7 +138253,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9889, + context: p12496, freeVariables: Identifiers{ "v", }, @@ -136676,7 +138274,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9889, + context: p12496, freeVariables: nil, }, Value: "\n", @@ -136706,7 +138304,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9736, + context: p12343, freeVariables: Identifiers{ "cindent", "split", @@ -136727,7 +138325,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9736, + context: p12343, freeVariables: Identifiers{ "std", }, @@ -136746,7 +138344,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9736, + context: p12343, freeVariables: Identifiers{ "std", }, @@ -136792,7 +138390,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9901, + context: p12508, freeVariables: Identifiers{ "cindent", }, @@ -136811,7 +138409,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9901, + context: p12508, freeVariables: nil, }, Value: "\n", @@ -136833,7 +138431,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9901, + context: p12508, freeVariables: Identifiers{ "cindent", }, @@ -136855,7 +138453,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9901, + context: p12508, freeVariables: Identifiers{ "split", "std", @@ -136875,7 +138473,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9901, + context: p12508, freeVariables: nil, }, Elements: Nodes{ @@ -136893,7 +138491,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9911, + context: p12518, freeVariables: nil, }, Value: "|", @@ -137003,7 +138601,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9901, + context: p12508, freeVariables: Identifiers{ "split", }, @@ -137024,7 +138622,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9901, + context: p12508, freeVariables: nil, }, Value: float64(0), @@ -137044,7 +138642,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9901, + context: p12508, freeVariables: Identifiers{ "split", "std", @@ -137064,7 +138662,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9901, + context: p12508, freeVariables: Identifiers{ "split", "std", @@ -137084,7 +138682,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9901, + context: p12508, freeVariables: Identifiers{ "std", }, @@ -137103,7 +138701,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9901, + context: p12508, freeVariables: Identifiers{ "std", }, @@ -137149,7 +138747,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9934, + context: p12541, freeVariables: Identifiers{ "split", }, @@ -137177,7 +138775,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9901, + context: p12508, freeVariables: nil, }, Value: float64(1), @@ -137230,7 +138828,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9736, + context: p12343, freeVariables: Identifiers{ "std", "v", @@ -137250,7 +138848,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9736, + context: p12343, freeVariables: Identifiers{ "std", }, @@ -137269,7 +138867,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9736, + context: p12343, freeVariables: Identifiers{ "std", }, @@ -137315,7 +138913,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9947, + context: p12554, freeVariables: Identifiers{ "v", }, @@ -137341,15 +138939,14 @@ var StdAst = &DesugaredObject{ }, End: Location{ Line: int(939), - Column: int(34), + Column: int(87), }, file: p1, }, - context: p9736, + context: p12343, freeVariables: Identifiers{ "aux", "cindent", - "in_array", "in_object", "path", "std", @@ -137455,7 +139052,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9736, + context: p12343, freeVariables: Identifiers{ "std", "v", @@ -137475,7 +139072,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9736, + context: p12343, freeVariables: Identifiers{ "std", }, @@ -137494,7 +139091,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9736, + context: p12343, freeVariables: Identifiers{ "std", }, @@ -137540,7 +139137,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9968, + context: p12575, freeVariables: Identifiers{ "v", }, @@ -137567,7 +139164,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9736, + context: p12343, freeVariables: nil, }, Value: "function", @@ -137594,7 +139191,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9736, + context: p12343, freeVariables: Identifiers{ "path", }, @@ -137613,7 +139210,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9736, + context: p12343, freeVariables: Identifiers{ "path", }, @@ -137632,7 +139229,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9736, + context: p12343, freeVariables: nil, }, Value: "Tried to manifest function at ", @@ -137654,7 +139251,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9736, + context: p12343, freeVariables: Identifiers{ "path", }, @@ -137673,15 +139270,14 @@ var StdAst = &DesugaredObject{ }, End: Location{ Line: int(939), - Column: int(34), + Column: int(87), }, file: p1, }, - context: p9736, + context: p12343, freeVariables: Identifiers{ "aux", "cindent", - "in_array", "in_object", "path", "std", @@ -137787,7 +139383,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9736, + context: p12343, freeVariables: Identifiers{ "std", "v", @@ -137807,7 +139403,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9736, + context: p12343, freeVariables: Identifiers{ "std", }, @@ -137826,7 +139422,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9736, + context: p12343, freeVariables: Identifiers{ "std", }, @@ -137872,7 +139468,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9997, + context: p12604, freeVariables: Identifiers{ "v", }, @@ -137899,7 +139495,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9736, + context: p12343, freeVariables: nil, }, Value: "array", @@ -137921,12 +139517,12 @@ var StdAst = &DesugaredObject{ Column: int(9), }, End: Location{ - Line: int(928), - Column: int(101), + Line: int(929), + Column: int(64), }, file: p1, }, - context: p9736, + context: p12343, freeVariables: Identifiers{ "aux", "cindent", @@ -138035,7 +139631,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9736, + context: p12343, freeVariables: Identifiers{ "std", "v", @@ -138055,7 +139651,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9736, + context: p12343, freeVariables: Identifiers{ "std", }, @@ -138074,7 +139670,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9736, + context: p12343, freeVariables: Identifiers{ "std", }, @@ -138120,7 +139716,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p10019, + context: p12626, freeVariables: Identifiers{ "v", }, @@ -138147,7 +139743,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9736, + context: p12343, freeVariables: nil, }, Value: float64(0), @@ -138173,7 +139769,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9736, + context: p12343, freeVariables: nil, }, Value: "[]", @@ -138189,12 +139785,12 @@ var StdAst = &DesugaredObject{ Column: int(11), }, End: Location{ - Line: int(928), - Column: int(101), + Line: int(929), + Column: int(64), }, file: p1, }, - context: p9736, + context: p12343, freeVariables: Identifiers{ "aux", "cindent", @@ -138221,7 +139817,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p10027, + context: p12634, freeVariables: Identifiers{ "std", "v", @@ -138241,7 +139837,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p10027, + context: p12634, freeVariables: Identifiers{ "std", }, @@ -138260,7 +139856,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p10027, + context: p12634, freeVariables: Identifiers{ "std", }, @@ -138306,7 +139902,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p10036, + context: p12643, freeVariables: nil, }, Value: float64(0), @@ -138326,7 +139922,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p10036, + context: p12643, freeVariables: Identifiers{ "std", "v", @@ -138346,7 +139942,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p10036, + context: p12643, freeVariables: Identifiers{ "std", "v", @@ -138366,7 +139962,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p10036, + context: p12643, freeVariables: Identifiers{ "std", }, @@ -138385,7 +139981,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p10036, + context: p12643, freeVariables: Identifiers{ "std", }, @@ -138431,7 +140027,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p10048, + context: p12655, freeVariables: Identifiers{ "v", }, @@ -138459,7 +140055,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p10036, + context: p12643, freeVariables: nil, }, Value: float64(1), @@ -138484,12 +140080,12 @@ var StdAst = &DesugaredObject{ Column: int(11), }, End: Location{ - Line: int(928), - Column: int(101), + Line: int(929), + Column: int(64), }, file: p1, }, - context: p9736, + context: p12343, freeVariables: Identifiers{ "aux", "cindent", @@ -138502,103 +140098,50 @@ var StdAst = &DesugaredObject{ }, Binds: LocalBinds{ LocalBind{ - Variable: "new_indent", - Body: &Binary{ + Variable: "actual_indent", + Body: &Conditional{ NodeBase: NodeBase{ loc: LocationRange{ FileName: "", Begin: Location{ Line: int(926), - Column: int(30), + Column: int(33), }, End: Location{ Line: int(926), - Column: int(44), + Column: int(75), }, file: p1, }, - context: p10055, + context: p12662, freeVariables: Identifiers{ "cindent", + "in_object", + "std", }, }, - Left: &Var{ + Cond: &Var{ NodeBase: NodeBase{ loc: LocationRange{ FileName: "", Begin: Location{ Line: int(926), - Column: int(30), + Column: int(36), }, End: Location{ Line: int(926), - Column: int(37), + Column: int(45), }, file: p1, }, - context: p10055, + context: p12662, freeVariables: Identifiers{ - "cindent", - }, - }, - Id: "cindent", - }, - Op: BinaryOp(3), - Right: &LiteralString{ - NodeBase: NodeBase{ - loc: LocationRange{ - FileName: "", - Begin: Location{ - Line: int(926), - Column: int(40), - }, - End: Location{ - Line: int(926), - Column: int(44), - }, - file: p1, + "in_object", }, - context: p10055, - freeVariables: nil, }, - Value: " ", - Kind: LiteralStringKind(1), - BlockIndent: "", + Id: "in_object", }, - }, - Fun: nil, - }, - }, - Body: &Local{ - NodeBase: NodeBase{ - loc: LocationRange{ - FileName: "", - Begin: Location{ - Line: int(927), - Column: int(11), - }, - End: Location{ - Line: int(928), - Column: int(101), - }, - file: p1, - }, - context: p9736, - freeVariables: Identifiers{ - "aux", - "cindent", - "in_object", - "new_indent", - "path", - "range", - "std", - "v", - }, - }, - Binds: LocalBinds{ - LocalBind{ - Variable: "parts", - Body: &Apply{ + BranchTrue: &Apply{ NodeBase: NodeBase{ loc: LocationRange{ FileName: "", @@ -138614,12 +140157,234 @@ var StdAst = &DesugaredObject{ }, context: nil, freeVariables: Identifiers{ - "aux", - "new_indent", - "path", - "range", + "cindent", "std", - "v", + }, + }, + Target: &Index{ + NodeBase: NodeBase{ + loc: LocationRange{ + FileName: "", + Begin: Location{ + Line: int(0), + Column: int(0), + }, + End: Location{ + Line: int(0), + Column: int(0), + }, + file: nil, + }, + context: nil, + freeVariables: Identifiers{ + "std", + }, + }, + Target: &Var{ + NodeBase: NodeBase{ + loc: LocationRange{ + FileName: "", + Begin: Location{ + Line: int(0), + Column: int(0), + }, + End: Location{ + Line: int(0), + Column: int(0), + }, + file: nil, + }, + context: nil, + freeVariables: Identifiers{ + "std", + }, + }, + Id: "std", + }, + Index: &LiteralString{ + NodeBase: NodeBase{ + loc: LocationRange{ + FileName: "", + Begin: Location{ + Line: int(0), + Column: int(0), + }, + End: Location{ + Line: int(0), + Column: int(0), + }, + file: nil, + }, + context: nil, + freeVariables: nil, + }, + Value: "slice", + Kind: LiteralStringKind(1), + BlockIndent: "", + }, + Id: nil, + }, + Arguments: Arguments{ + Positional: Nodes{ + &Var{ + NodeBase: NodeBase{ + loc: LocationRange{ + FileName: "", + Begin: Location{ + Line: int(926), + Column: int(51), + }, + End: Location{ + Line: int(926), + Column: int(58), + }, + file: p1, + }, + context: p12662, + freeVariables: Identifiers{ + "cindent", + }, + }, + Id: "cindent", + }, + &LiteralNumber{ + NodeBase: NodeBase{ + loc: LocationRange{ + FileName: "", + Begin: Location{ + Line: int(926), + Column: int(59), + }, + End: Location{ + Line: int(926), + Column: int(60), + }, + file: p1, + }, + context: p12662, + freeVariables: nil, + }, + Value: float64(2), + OriginalString: "2", + }, + &LiteralNull{ + NodeBase: NodeBase{ + loc: LocationRange{ + FileName: "", + Begin: Location{ + Line: int(0), + Column: int(0), + }, + End: Location{ + Line: int(0), + Column: int(0), + }, + file: nil, + }, + context: nil, + freeVariables: nil, + }, + }, + &LiteralNull{ + NodeBase: NodeBase{ + loc: LocationRange{ + FileName: "", + Begin: Location{ + Line: int(0), + Column: int(0), + }, + End: Location{ + Line: int(0), + Column: int(0), + }, + file: nil, + }, + context: nil, + freeVariables: nil, + }, + }, + }, + Named: nil, + }, + TrailingComma: false, + TailStrict: false, + }, + BranchFalse: &Var{ + NodeBase: NodeBase{ + loc: LocationRange{ + FileName: "", + Begin: Location{ + Line: int(926), + Column: int(68), + }, + End: Location{ + Line: int(926), + Column: int(75), + }, + file: p1, + }, + context: p12662, + freeVariables: Identifiers{ + "cindent", + }, + }, + Id: "cindent", + }, + }, + Fun: nil, + }, + }, + Body: &Local{ + NodeBase: NodeBase{ + loc: LocationRange{ + FileName: "", + Begin: Location{ + Line: int(927), + Column: int(11), + }, + End: Location{ + Line: int(929), + Column: int(64), + }, + file: p1, + }, + context: p12343, + freeVariables: Identifiers{ + "actual_indent", + "aux", + "cindent", + "in_object", + "path", + "range", + "std", + "v", + }, + }, + Binds: LocalBinds{ + LocalBind{ + Variable: "parts", + Body: &Apply{ + NodeBase: NodeBase{ + loc: LocationRange{ + FileName: "", + Begin: Location{ + Line: int(0), + Column: int(0), + }, + End: Location{ + Line: int(0), + Column: int(0), + }, + file: nil, + }, + context: nil, + freeVariables: Identifiers{ + "aux", + "cindent", + "path", + "range", + "std", + "v", }, }, Target: &Index{ @@ -138704,7 +140469,7 @@ var StdAst = &DesugaredObject{ context: nil, freeVariables: Identifiers{ "aux", - "new_indent", + "cindent", "path", "v", }, @@ -138733,8 +140498,8 @@ var StdAst = &DesugaredObject{ context: nil, freeVariables: Identifiers{ "aux", + "cindent", "i", - "new_indent", "path", "v", }, @@ -138750,15 +140515,15 @@ var StdAst = &DesugaredObject{ }, End: Location{ Line: int(927), - Column: int(72), + Column: int(63), }, file: p1, }, - context: p10078, + context: p12699, freeVariables: Identifiers{ "aux", + "cindent", "i", - "new_indent", "path", "v", }, @@ -138777,7 +140542,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p10078, + context: p12699, freeVariables: Identifiers{ "aux", }, @@ -138800,7 +140565,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p10084, + context: p12705, freeVariables: Identifiers{ "i", "v", @@ -138820,7 +140585,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p10084, + context: p12705, freeVariables: Identifiers{ "v", }, @@ -138841,7 +140606,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p10084, + context: p12705, freeVariables: Identifiers{ "i", }, @@ -138860,30 +140625,11 @@ var StdAst = &DesugaredObject{ }, End: Location{ Line: int(927), - Column: int(40), - }, - file: p1, - }, - context: p10084, - freeVariables: nil, - }, - Value: true, - }, - &LiteralBoolean{ - NodeBase: NodeBase{ - loc: LocationRange{ - FileName: "", - Begin: Location{ - Line: int(927), - Column: int(42), - }, - End: Location{ - Line: int(927), - Column: int(47), + Column: int(41), }, file: p1, }, - context: p10084, + context: p12705, freeVariables: nil, }, Value: false, @@ -138894,15 +140640,15 @@ var StdAst = &DesugaredObject{ FileName: "", Begin: Location{ Line: int(927), - Column: int(49), + Column: int(43), }, End: Location{ Line: int(927), - Column: int(59), + Column: int(53), }, file: p1, }, - context: p10084, + context: p12705, freeVariables: Identifiers{ "i", "path", @@ -138914,15 +140660,15 @@ var StdAst = &DesugaredObject{ FileName: "", Begin: Location{ Line: int(927), - Column: int(49), + Column: int(43), }, End: Location{ Line: int(927), - Column: int(53), + Column: int(47), }, file: p1, }, - context: p10084, + context: p12705, freeVariables: Identifiers{ "path", }, @@ -138936,15 +140682,15 @@ var StdAst = &DesugaredObject{ FileName: "", Begin: Location{ Line: int(927), - Column: int(56), + Column: int(50), }, End: Location{ Line: int(927), - Column: int(59), + Column: int(53), }, file: p1, }, - context: p10084, + context: p12705, freeVariables: Identifiers{ "i", }, @@ -138956,15 +140702,15 @@ var StdAst = &DesugaredObject{ FileName: "", Begin: Location{ Line: int(927), - Column: int(57), + Column: int(51), }, End: Location{ Line: int(927), - Column: int(58), + Column: int(52), }, file: p1, }, - context: p10100, + context: p12720, freeVariables: Identifiers{ "i", }, @@ -138981,20 +140727,20 @@ var StdAst = &DesugaredObject{ FileName: "", Begin: Location{ Line: int(927), - Column: int(61), + Column: int(55), }, End: Location{ Line: int(927), - Column: int(71), + Column: int(62), }, file: p1, }, - context: p10084, + context: p12705, freeVariables: Identifiers{ - "new_indent", + "cindent", }, }, - Id: "new_indent", + Id: "cindent", }, }, Named: nil, @@ -139012,15 +140758,15 @@ var StdAst = &DesugaredObject{ FileName: "", Begin: Location{ Line: int(927), - Column: int(82), + Column: int(73), }, End: Location{ Line: int(927), - Column: int(87), + Column: int(78), }, file: p1, }, - context: p10105, + context: p12725, freeVariables: Identifiers{ "range", }, @@ -139045,14 +140791,14 @@ var StdAst = &DesugaredObject{ Column: int(11), }, End: Location{ - Line: int(928), - Column: int(101), + Line: int(929), + Column: int(64), }, file: p1, }, - context: p9736, + context: p12343, freeVariables: Identifiers{ - "cindent", + "actual_indent", "in_object", "parts", "std", @@ -139067,14 +140813,14 @@ var StdAst = &DesugaredObject{ Column: int(11), }, End: Location{ - Line: int(928), - Column: int(60), + Line: int(929), + Column: int(17), }, file: p1, }, - context: p9736, + context: p12343, freeVariables: Identifiers{ - "cindent", + "actual_indent", "in_object", }, }, @@ -139088,13 +140834,13 @@ var StdAst = &DesugaredObject{ }, End: Location{ Line: int(928), - Column: int(52), + Column: int(58), }, file: p1, }, - context: p9736, + context: p12343, freeVariables: Identifiers{ - "cindent", + "actual_indent", "in_object", }, }, @@ -139112,7 +140858,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9736, + context: p12343, freeVariables: Identifiers{ "in_object", }, @@ -139129,13 +140875,13 @@ var StdAst = &DesugaredObject{ }, End: Location{ Line: int(928), - Column: int(44), + Column: int(50), }, file: p1, }, - context: p9736, + context: p12343, freeVariables: Identifiers{ - "cindent", + "actual_indent", }, }, Left: &LiteralString{ @@ -139152,7 +140898,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9736, + context: p12343, freeVariables: nil, }, Value: "\n", @@ -139170,16 +140916,16 @@ var StdAst = &DesugaredObject{ }, End: Location{ Line: int(928), - Column: int(44), + Column: int(50), }, file: p1, }, - context: p9736, + context: p12343, freeVariables: Identifiers{ - "cindent", + "actual_indent", }, }, - Id: "cindent", + Id: "actual_indent", }, }, BranchFalse: &LiteralString{ @@ -139188,15 +140934,15 @@ var StdAst = &DesugaredObject{ FileName: "", Begin: Location{ Line: int(928), - Column: int(50), + Column: int(56), }, End: Location{ Line: int(928), - Column: int(52), + Column: int(58), }, file: p1, }, - context: p9736, + context: p12343, freeVariables: nil, }, Value: "", @@ -139210,16 +140956,16 @@ var StdAst = &DesugaredObject{ loc: LocationRange{ FileName: "", Begin: Location{ - Line: int(928), - Column: int(56), + Line: int(929), + Column: int(13), }, End: Location{ - Line: int(928), - Column: int(60), + Line: int(929), + Column: int(17), }, file: p1, }, - context: p9736, + context: p12343, freeVariables: nil, }, Value: "- ", @@ -139233,18 +140979,18 @@ var StdAst = &DesugaredObject{ loc: LocationRange{ FileName: "", Begin: Location{ - Line: int(928), - Column: int(63), + Line: int(929), + Column: int(20), }, End: Location{ - Line: int(928), - Column: int(101), + Line: int(929), + Column: int(64), }, file: p1, }, - context: p9736, + context: p12343, freeVariables: Identifiers{ - "cindent", + "actual_indent", "parts", "std", }, @@ -139254,16 +141000,16 @@ var StdAst = &DesugaredObject{ loc: LocationRange{ FileName: "", Begin: Location{ - Line: int(928), - Column: int(63), + Line: int(929), + Column: int(20), }, End: Location{ - Line: int(928), - Column: int(71), + Line: int(929), + Column: int(28), }, file: p1, }, - context: p9736, + context: p12343, freeVariables: Identifiers{ "std", }, @@ -139273,16 +141019,16 @@ var StdAst = &DesugaredObject{ loc: LocationRange{ FileName: "", Begin: Location{ - Line: int(928), - Column: int(63), + Line: int(929), + Column: int(20), }, End: Location{ - Line: int(928), - Column: int(66), + Line: int(929), + Column: int(23), }, file: p1, }, - context: p9736, + context: p12343, freeVariables: Identifiers{ "std", }, @@ -139319,18 +141065,18 @@ var StdAst = &DesugaredObject{ loc: LocationRange{ FileName: "", Begin: Location{ - Line: int(928), - Column: int(72), + Line: int(929), + Column: int(29), }, End: Location{ - Line: int(928), - Column: int(93), + Line: int(929), + Column: int(56), }, file: p1, }, - context: p10131, + context: p12751, freeVariables: Identifiers{ - "cindent", + "actual_indent", }, }, Left: &Binary{ @@ -139338,18 +141084,18 @@ var StdAst = &DesugaredObject{ loc: LocationRange{ FileName: "", Begin: Location{ - Line: int(928), - Column: int(72), + Line: int(929), + Column: int(29), }, End: Location{ - Line: int(928), - Column: int(86), + Line: int(929), + Column: int(49), }, file: p1, }, - context: p10131, + context: p12751, freeVariables: Identifiers{ - "cindent", + "actual_indent", }, }, Left: &LiteralString{ @@ -139357,16 +141103,16 @@ var StdAst = &DesugaredObject{ loc: LocationRange{ FileName: "", Begin: Location{ - Line: int(928), - Column: int(72), + Line: int(929), + Column: int(29), }, End: Location{ - Line: int(928), - Column: int(76), + Line: int(929), + Column: int(33), }, file: p1, }, - context: p10131, + context: p12751, freeVariables: nil, }, Value: "\n", @@ -139379,21 +141125,21 @@ var StdAst = &DesugaredObject{ loc: LocationRange{ FileName: "", Begin: Location{ - Line: int(928), - Column: int(79), + Line: int(929), + Column: int(36), }, End: Location{ - Line: int(928), - Column: int(86), + Line: int(929), + Column: int(49), }, file: p1, }, - context: p10131, + context: p12751, freeVariables: Identifiers{ - "cindent", + "actual_indent", }, }, - Id: "cindent", + Id: "actual_indent", }, }, Op: BinaryOp(3), @@ -139402,16 +141148,16 @@ var StdAst = &DesugaredObject{ loc: LocationRange{ FileName: "", Begin: Location{ - Line: int(928), - Column: int(89), + Line: int(929), + Column: int(52), }, End: Location{ - Line: int(928), - Column: int(93), + Line: int(929), + Column: int(56), }, file: p1, }, - context: p10131, + context: p12751, freeVariables: nil, }, Value: "- ", @@ -139424,16 +141170,16 @@ var StdAst = &DesugaredObject{ loc: LocationRange{ FileName: "", Begin: Location{ - Line: int(928), - Column: int(95), + Line: int(929), + Column: int(58), }, End: Location{ - Line: int(928), - Column: int(100), + Line: int(929), + Column: int(63), }, file: p1, }, - context: p10131, + context: p12751, freeVariables: Identifiers{ "parts", }, @@ -139456,20 +141202,19 @@ var StdAst = &DesugaredObject{ loc: LocationRange{ FileName: "", Begin: Location{ - Line: int(929), + Line: int(930), Column: int(12), }, End: Location{ Line: int(939), - Column: int(34), + Column: int(87), }, file: p1, }, - context: p9736, + context: p12343, freeVariables: Identifiers{ "aux", "cindent", - "in_array", "in_object", "path", "std", @@ -139566,16 +141311,16 @@ var StdAst = &DesugaredObject{ loc: LocationRange{ FileName: "", Begin: Location{ - Line: int(929), + Line: int(930), Column: int(15), }, End: Location{ - Line: int(929), + Line: int(930), Column: int(26), }, file: p1, }, - context: p9736, + context: p12343, freeVariables: Identifiers{ "std", "v", @@ -139586,16 +141331,16 @@ var StdAst = &DesugaredObject{ loc: LocationRange{ FileName: "", Begin: Location{ - Line: int(929), + Line: int(930), Column: int(15), }, End: Location{ - Line: int(929), + Line: int(930), Column: int(23), }, file: p1, }, - context: p9736, + context: p12343, freeVariables: Identifiers{ "std", }, @@ -139605,16 +141350,16 @@ var StdAst = &DesugaredObject{ loc: LocationRange{ FileName: "", Begin: Location{ - Line: int(929), + Line: int(930), Column: int(15), }, End: Location{ - Line: int(929), + Line: int(930), Column: int(18), }, file: p1, }, - context: p9736, + context: p12343, freeVariables: Identifiers{ "std", }, @@ -139651,16 +141396,16 @@ var StdAst = &DesugaredObject{ loc: LocationRange{ FileName: "", Begin: Location{ - Line: int(929), + Line: int(930), Column: int(24), }, End: Location{ - Line: int(929), + Line: int(930), Column: int(25), }, file: p1, }, - context: p10160, + context: p12780, freeVariables: Identifiers{ "v", }, @@ -139678,16 +141423,16 @@ var StdAst = &DesugaredObject{ loc: LocationRange{ FileName: "", Begin: Location{ - Line: int(929), + Line: int(930), Column: int(30), }, End: Location{ - Line: int(929), + Line: int(930), Column: int(38), }, file: p1, }, - context: p9736, + context: p12343, freeVariables: nil, }, Value: "object", @@ -139705,20 +141450,19 @@ var StdAst = &DesugaredObject{ loc: LocationRange{ FileName: "", Begin: Location{ - Line: int(930), + Line: int(931), Column: int(9), }, End: Location{ Line: int(939), - Column: int(34), + Column: int(87), }, file: p1, }, - context: p9736, + context: p12343, freeVariables: Identifiers{ "aux", "cindent", - "in_array", "in_object", "path", "std", @@ -139815,16 +141559,16 @@ var StdAst = &DesugaredObject{ loc: LocationRange{ FileName: "", Begin: Location{ - Line: int(930), + Line: int(931), Column: int(12), }, End: Location{ - Line: int(930), + Line: int(931), Column: int(25), }, file: p1, }, - context: p9736, + context: p12343, freeVariables: Identifiers{ "std", "v", @@ -139835,16 +141579,16 @@ var StdAst = &DesugaredObject{ loc: LocationRange{ FileName: "", Begin: Location{ - Line: int(930), + Line: int(931), Column: int(12), }, End: Location{ - Line: int(930), + Line: int(931), Column: int(22), }, file: p1, }, - context: p9736, + context: p12343, freeVariables: Identifiers{ "std", }, @@ -139854,16 +141598,16 @@ var StdAst = &DesugaredObject{ loc: LocationRange{ FileName: "", Begin: Location{ - Line: int(930), + Line: int(931), Column: int(12), }, End: Location{ - Line: int(930), + Line: int(931), Column: int(15), }, file: p1, }, - context: p9736, + context: p12343, freeVariables: Identifiers{ "std", }, @@ -139900,16 +141644,16 @@ var StdAst = &DesugaredObject{ loc: LocationRange{ FileName: "", Begin: Location{ - Line: int(930), + Line: int(931), Column: int(23), }, End: Location{ - Line: int(930), + Line: int(931), Column: int(24), }, file: p1, }, - context: p10182, + context: p12802, freeVariables: Identifiers{ "v", }, @@ -139927,16 +141671,16 @@ var StdAst = &DesugaredObject{ loc: LocationRange{ FileName: "", Begin: Location{ - Line: int(930), + Line: int(931), Column: int(29), }, End: Location{ - Line: int(930), + Line: int(931), Column: int(30), }, file: p1, }, - context: p9736, + context: p12343, freeVariables: nil, }, Value: float64(0), @@ -139953,16 +141697,16 @@ var StdAst = &DesugaredObject{ loc: LocationRange{ FileName: "", Begin: Location{ - Line: int(931), + Line: int(932), Column: int(11), }, End: Location{ - Line: int(931), + Line: int(932), Column: int(15), }, file: p1, }, - context: p9736, + context: p12343, freeVariables: nil, }, Value: "{}", @@ -139974,20 +141718,19 @@ var StdAst = &DesugaredObject{ loc: LocationRange{ FileName: "", Begin: Location{ - Line: int(933), + Line: int(934), Column: int(11), }, End: Location{ Line: int(939), - Column: int(34), + Column: int(87), }, file: p1, }, - context: p9736, + context: p12343, freeVariables: Identifiers{ "aux", "cindent", - "in_array", "in_object", "path", "std", @@ -140002,16 +141745,16 @@ var StdAst = &DesugaredObject{ loc: LocationRange{ FileName: "", Begin: Location{ - Line: int(933), + Line: int(934), Column: int(30), }, End: Location{ - Line: int(933), + Line: int(934), Column: int(44), }, file: p1, }, - context: p10190, + context: p12810, freeVariables: Identifiers{ "cindent", }, @@ -140021,16 +141764,16 @@ var StdAst = &DesugaredObject{ loc: LocationRange{ FileName: "", Begin: Location{ - Line: int(933), + Line: int(934), Column: int(30), }, End: Location{ - Line: int(933), + Line: int(934), Column: int(37), }, file: p1, }, - context: p10190, + context: p12810, freeVariables: Identifiers{ "cindent", }, @@ -140043,16 +141786,16 @@ var StdAst = &DesugaredObject{ loc: LocationRange{ FileName: "", Begin: Location{ - Line: int(933), + Line: int(934), Column: int(40), }, End: Location{ - Line: int(933), + Line: int(934), Column: int(44), }, file: p1, }, - context: p10190, + context: p12810, freeVariables: nil, }, Value: " ", @@ -140068,20 +141811,19 @@ var StdAst = &DesugaredObject{ loc: LocationRange{ FileName: "", Begin: Location{ - Line: int(934), + Line: int(935), Column: int(11), }, End: Location{ Line: int(939), - Column: int(34), + Column: int(87), }, file: p1, }, - context: p9736, + context: p12343, freeVariables: Identifiers{ "aux", "cindent", - "in_array", "in_object", "new_indent", "path", @@ -140109,7 +141851,6 @@ var StdAst = &DesugaredObject{ context: nil, freeVariables: Identifiers{ "aux", - "cindent", "new_indent", "path", "std", @@ -140198,7 +141939,6 @@ var StdAst = &DesugaredObject{ context: nil, freeVariables: Identifiers{ "aux", - "cindent", "new_indent", "path", "std", @@ -140229,7 +141969,6 @@ var StdAst = &DesugaredObject{ context: nil, freeVariables: Identifiers{ "aux", - "cindent", "k", "new_indent", "path", @@ -140243,19 +141982,18 @@ var StdAst = &DesugaredObject{ loc: LocationRange{ FileName: "", Begin: Location{ - Line: int(935), + Line: int(936), Column: int(13), }, End: Location{ - Line: int(935), - Column: int(102), + Line: int(936), + Column: int(85), }, file: p1, }, - context: p10213, + context: p12833, freeVariables: Identifiers{ "aux", - "cindent", "k", "new_indent", "path", @@ -140268,177 +142006,132 @@ var StdAst = &DesugaredObject{ loc: LocationRange{ FileName: "", Begin: Location{ - Line: int(935), + Line: int(936), Column: int(13), }, End: Location{ - Line: int(935), - Column: int(53), + Line: int(936), + Column: int(43), }, file: p1, }, - context: p10213, + context: p12833, freeVariables: Identifiers{ - "cindent", "k", "std", }, }, - Left: &Binary{ + Left: &Apply{ NodeBase: NodeBase{ loc: LocationRange{ FileName: "", Begin: Location{ - Line: int(935), + Line: int(936), Column: int(13), }, End: Location{ - Line: int(935), - Column: int(46), + Line: int(936), + Column: int(36), }, file: p1, }, - context: p10213, + context: p12833, freeVariables: Identifiers{ - "cindent", "k", "std", }, }, - Left: &Var{ + Target: &Index{ NodeBase: NodeBase{ loc: LocationRange{ FileName: "", Begin: Location{ - Line: int(935), + Line: int(936), Column: int(13), }, End: Location{ - Line: int(935), - Column: int(20), - }, - file: p1, - }, - context: p10213, - freeVariables: Identifiers{ - "cindent", - }, - }, - Id: "cindent", - }, - Op: BinaryOp(3), - Right: &Apply{ - NodeBase: NodeBase{ - loc: LocationRange{ - FileName: "", - Begin: Location{ - Line: int(935), - Column: int(23), - }, - End: Location{ - Line: int(935), - Column: int(46), + Line: int(936), + Column: int(33), }, file: p1, }, - context: p10213, + context: p12833, freeVariables: Identifiers{ - "k", "std", }, }, - Target: &Index{ + Target: &Var{ NodeBase: NodeBase{ loc: LocationRange{ FileName: "", Begin: Location{ - Line: int(935), - Column: int(23), + Line: int(936), + Column: int(13), }, End: Location{ - Line: int(935), - Column: int(43), + Line: int(936), + Column: int(16), }, file: p1, }, - context: p10213, + context: p12833, freeVariables: Identifiers{ "std", }, }, - Target: &Var{ - NodeBase: NodeBase{ - loc: LocationRange{ - FileName: "", - Begin: Location{ - Line: int(935), - Column: int(23), - }, - End: Location{ - Line: int(935), - Column: int(26), - }, - file: p1, + Id: "std", + }, + Index: &LiteralString{ + NodeBase: NodeBase{ + loc: LocationRange{ + FileName: "", + Begin: Location{ + Line: int(0), + Column: int(0), }, - context: p10213, - freeVariables: Identifiers{ - "std", + End: Location{ + Line: int(0), + Column: int(0), }, + file: nil, }, - Id: "std", + context: nil, + freeVariables: nil, }, - Index: &LiteralString{ + Value: "escapeStringJson", + Kind: LiteralStringKind(1), + BlockIndent: "", + }, + Id: nil, + }, + Arguments: Arguments{ + Positional: Nodes{ + &Var{ NodeBase: NodeBase{ loc: LocationRange{ - FileName: "", + FileName: "", Begin: Location{ - Line: int(0), - Column: int(0), + Line: int(936), + Column: int(34), }, End: Location{ - Line: int(0), - Column: int(0), + Line: int(936), + Column: int(35), }, - file: nil, + file: p1, }, - context: nil, - freeVariables: nil, - }, - Value: "escapeStringJson", - Kind: LiteralStringKind(1), - BlockIndent: "", - }, - Id: nil, - }, - Arguments: Arguments{ - Positional: Nodes{ - &Var{ - NodeBase: NodeBase{ - loc: LocationRange{ - FileName: "", - Begin: Location{ - Line: int(935), - Column: int(44), - }, - End: Location{ - Line: int(935), - Column: int(45), - }, - file: p1, - }, - context: p10230, - freeVariables: Identifiers{ - "k", - }, + context: p12846, + freeVariables: Identifiers{ + "k", }, - Id: "k", }, + Id: "k", }, - Named: nil, }, - TrailingComma: false, - TailStrict: false, + Named: nil, }, + TrailingComma: false, + TailStrict: false, }, Op: BinaryOp(3), Right: &LiteralString{ @@ -140446,16 +142139,16 @@ var StdAst = &DesugaredObject{ loc: LocationRange{ FileName: "", Begin: Location{ - Line: int(935), - Column: int(49), + Line: int(936), + Column: int(39), }, End: Location{ - Line: int(935), - Column: int(53), + Line: int(936), + Column: int(43), }, file: p1, }, - context: p10213, + context: p12833, freeVariables: nil, }, Value: ": ", @@ -140469,16 +142162,16 @@ var StdAst = &DesugaredObject{ loc: LocationRange{ FileName: "", Begin: Location{ - Line: int(935), - Column: int(56), + Line: int(936), + Column: int(46), }, End: Location{ - Line: int(935), - Column: int(102), + Line: int(936), + Column: int(85), }, file: p1, }, - context: p10213, + context: p12833, freeVariables: Identifiers{ "aux", "k", @@ -140492,16 +142185,16 @@ var StdAst = &DesugaredObject{ loc: LocationRange{ FileName: "", Begin: Location{ - Line: int(935), - Column: int(56), + Line: int(936), + Column: int(46), }, End: Location{ - Line: int(935), - Column: int(59), + Line: int(936), + Column: int(49), }, file: p1, }, - context: p10213, + context: p12833, freeVariables: Identifiers{ "aux", }, @@ -140515,16 +142208,16 @@ var StdAst = &DesugaredObject{ loc: LocationRange{ FileName: "", Begin: Location{ - Line: int(935), - Column: int(60), + Line: int(936), + Column: int(50), }, End: Location{ - Line: int(935), - Column: int(64), + Line: int(936), + Column: int(54), }, file: p1, }, - context: p10239, + context: p12855, freeVariables: Identifiers{ "k", "v", @@ -140535,16 +142228,16 @@ var StdAst = &DesugaredObject{ loc: LocationRange{ FileName: "", Begin: Location{ - Line: int(935), - Column: int(60), + Line: int(936), + Column: int(50), }, End: Location{ - Line: int(935), - Column: int(61), + Line: int(936), + Column: int(51), }, file: p1, }, - context: p10239, + context: p12855, freeVariables: Identifiers{ "v", }, @@ -140556,16 +142249,16 @@ var StdAst = &DesugaredObject{ loc: LocationRange{ FileName: "", Begin: Location{ - Line: int(935), - Column: int(62), + Line: int(936), + Column: int(52), }, End: Location{ - Line: int(935), - Column: int(63), + Line: int(936), + Column: int(53), }, file: p1, }, - context: p10239, + context: p12855, freeVariables: Identifiers{ "k", }, @@ -140579,35 +142272,16 @@ var StdAst = &DesugaredObject{ loc: LocationRange{ FileName: "", Begin: Location{ - Line: int(935), - Column: int(66), - }, - End: Location{ - Line: int(935), - Column: int(71), - }, - file: p1, - }, - context: p10239, - freeVariables: nil, - }, - Value: false, - }, - &LiteralBoolean{ - NodeBase: NodeBase{ - loc: LocationRange{ - FileName: "", - Begin: Location{ - Line: int(935), - Column: int(73), + Line: int(936), + Column: int(56), }, End: Location{ - Line: int(935), - Column: int(77), + Line: int(936), + Column: int(60), }, file: p1, }, - context: p10239, + context: p12855, freeVariables: nil, }, Value: true, @@ -140617,16 +142291,16 @@ var StdAst = &DesugaredObject{ loc: LocationRange{ FileName: "", Begin: Location{ - Line: int(935), - Column: int(79), + Line: int(936), + Column: int(62), }, End: Location{ - Line: int(935), - Column: int(89), + Line: int(936), + Column: int(72), }, file: p1, }, - context: p10239, + context: p12855, freeVariables: Identifiers{ "k", "path", @@ -140637,16 +142311,16 @@ var StdAst = &DesugaredObject{ loc: LocationRange{ FileName: "", Begin: Location{ - Line: int(935), - Column: int(79), + Line: int(936), + Column: int(62), }, End: Location{ - Line: int(935), - Column: int(83), + Line: int(936), + Column: int(66), }, file: p1, }, - context: p10239, + context: p12855, freeVariables: Identifiers{ "path", }, @@ -140659,16 +142333,16 @@ var StdAst = &DesugaredObject{ loc: LocationRange{ FileName: "", Begin: Location{ - Line: int(935), - Column: int(86), + Line: int(936), + Column: int(69), }, End: Location{ - Line: int(935), - Column: int(89), + Line: int(936), + Column: int(72), }, file: p1, }, - context: p10239, + context: p12855, freeVariables: Identifiers{ "k", }, @@ -140679,16 +142353,16 @@ var StdAst = &DesugaredObject{ loc: LocationRange{ FileName: "", Begin: Location{ - Line: int(935), - Column: int(87), + Line: int(936), + Column: int(70), }, End: Location{ - Line: int(935), - Column: int(88), + Line: int(936), + Column: int(71), }, file: p1, }, - context: p10255, + context: p12870, freeVariables: Identifiers{ "k", }, @@ -140704,16 +142378,16 @@ var StdAst = &DesugaredObject{ loc: LocationRange{ FileName: "", Begin: Location{ - Line: int(935), - Column: int(91), + Line: int(936), + Column: int(74), }, End: Location{ - Line: int(935), - Column: int(101), + Line: int(936), + Column: int(84), }, file: p1, }, - context: p10239, + context: p12855, freeVariables: Identifiers{ "new_indent", }, @@ -140736,16 +142410,16 @@ var StdAst = &DesugaredObject{ loc: LocationRange{ FileName: "", Begin: Location{ - Line: int(936), + Line: int(937), Column: int(22), }, End: Location{ - Line: int(936), + Line: int(937), Column: int(41), }, file: p1, }, - context: p10260, + context: p12875, freeVariables: Identifiers{ "std", "v", @@ -140756,16 +142430,16 @@ var StdAst = &DesugaredObject{ loc: LocationRange{ FileName: "", Begin: Location{ - Line: int(936), + Line: int(937), Column: int(22), }, End: Location{ - Line: int(936), + Line: int(937), Column: int(38), }, file: p1, }, - context: p10260, + context: p12875, freeVariables: Identifiers{ "std", }, @@ -140775,16 +142449,16 @@ var StdAst = &DesugaredObject{ loc: LocationRange{ FileName: "", Begin: Location{ - Line: int(936), + Line: int(937), Column: int(22), }, End: Location{ - Line: int(936), + Line: int(937), Column: int(25), }, file: p1, }, - context: p10260, + context: p12875, freeVariables: Identifiers{ "std", }, @@ -140821,16 +142495,16 @@ var StdAst = &DesugaredObject{ loc: LocationRange{ FileName: "", Begin: Location{ - Line: int(936), + Line: int(937), Column: int(39), }, End: Location{ - Line: int(936), + Line: int(937), Column: int(40), }, file: p1, }, - context: p10269, + context: p12884, freeVariables: Identifiers{ "v", }, @@ -140857,18 +142531,18 @@ var StdAst = &DesugaredObject{ loc: LocationRange{ FileName: "", Begin: Location{ - Line: int(938), + Line: int(939), Column: int(11), }, End: Location{ Line: int(939), - Column: int(34), + Column: int(87), }, file: p1, }, - context: p9736, + context: p12343, freeVariables: Identifiers{ - "in_array", + "cindent", "in_object", "lines", "std", @@ -140879,121 +142553,120 @@ var StdAst = &DesugaredObject{ loc: LocationRange{ FileName: "", Begin: Location{ - Line: int(938), + Line: int(939), Column: int(12), }, End: Location{ - Line: int(938), - Column: int(54), + Line: int(939), + Column: int(52), }, file: p1, }, - context: p9736, + context: p12343, freeVariables: Identifiers{ - "in_array", + "cindent", "in_object", }, }, - Cond: &Binary{ + Cond: &Var{ NodeBase: NodeBase{ loc: LocationRange{ FileName: "", Begin: Location{ - Line: int(938), + Line: int(939), Column: int(15), }, End: Location{ - Line: int(938), - Column: int(36), + Line: int(939), + Column: int(24), }, file: p1, }, - context: p9736, + context: p12343, freeVariables: Identifiers{ - "in_array", "in_object", }, }, - Left: &Var{ + Id: "in_object", + }, + BranchTrue: &Binary{ + NodeBase: NodeBase{ + loc: LocationRange{ + FileName: "", + Begin: Location{ + Line: int(939), + Column: int(30), + }, + End: Location{ + Line: int(939), + Column: int(44), + }, + file: p1, + }, + context: p12343, + freeVariables: Identifiers{ + "cindent", + }, + }, + Left: &LiteralString{ NodeBase: NodeBase{ loc: LocationRange{ FileName: "", Begin: Location{ - Line: int(938), - Column: int(15), + Line: int(939), + Column: int(30), }, End: Location{ - Line: int(938), - Column: int(23), + Line: int(939), + Column: int(34), }, file: p1, }, - context: p9736, - freeVariables: Identifiers{ - "in_array", - }, + context: p12343, + freeVariables: nil, }, - Id: "in_array", + Value: "\n", + Kind: LiteralStringKind(1), + BlockIndent: "", }, - Op: BinaryOp(18), + Op: BinaryOp(3), Right: &Var{ NodeBase: NodeBase{ loc: LocationRange{ FileName: "", Begin: Location{ - Line: int(938), - Column: int(27), + Line: int(939), + Column: int(37), }, End: Location{ - Line: int(938), - Column: int(36), + Line: int(939), + Column: int(44), }, file: p1, }, - context: p9736, + context: p12343, freeVariables: Identifiers{ - "in_object", - }, - }, - Id: "in_object", - }, - }, - BranchTrue: &LiteralString{ - NodeBase: NodeBase{ - loc: LocationRange{ - FileName: "", - Begin: Location{ - Line: int(938), - Column: int(42), - }, - End: Location{ - Line: int(938), - Column: int(46), + "cindent", }, - file: p1, }, - context: p9736, - freeVariables: nil, + Id: "cindent", }, - Value: "\n", - Kind: LiteralStringKind(1), - BlockIndent: "", }, BranchFalse: &LiteralString{ NodeBase: NodeBase{ loc: LocationRange{ FileName: "", Begin: Location{ - Line: int(938), - Column: int(52), + Line: int(939), + Column: int(50), }, End: Location{ - Line: int(938), - Column: int(54), + Line: int(939), + Column: int(52), }, file: p1, }, - context: p9736, + context: p12343, freeVariables: nil, }, Value: "", @@ -141008,16 +142681,17 @@ var StdAst = &DesugaredObject{ FileName: "", Begin: Location{ Line: int(939), - Column: int(13), + Column: int(56), }, End: Location{ Line: int(939), - Column: int(34), + Column: int(87), }, file: p1, }, - context: p9736, + context: p12343, freeVariables: Identifiers{ + "cindent", "lines", "std", }, @@ -141028,15 +142702,15 @@ var StdAst = &DesugaredObject{ FileName: "", Begin: Location{ Line: int(939), - Column: int(13), + Column: int(56), }, End: Location{ Line: int(939), - Column: int(21), + Column: int(64), }, file: p1, }, - context: p9736, + context: p12343, freeVariables: Identifiers{ "std", }, @@ -141047,15 +142721,15 @@ var StdAst = &DesugaredObject{ FileName: "", Begin: Location{ Line: int(939), - Column: int(13), + Column: int(56), }, End: Location{ Line: int(939), - Column: int(16), + Column: int(59), }, file: p1, }, - context: p9736, + context: p12343, freeVariables: Identifiers{ "std", }, @@ -141087,26 +142761,68 @@ var StdAst = &DesugaredObject{ }, Arguments: Arguments{ Positional: Nodes{ - &LiteralString{ + &Binary{ NodeBase: NodeBase{ loc: LocationRange{ FileName: "", Begin: Location{ Line: int(939), - Column: int(22), + Column: int(65), }, End: Location{ Line: int(939), - Column: int(26), + Column: int(79), }, file: p1, }, - context: p10292, - freeVariables: nil, + context: p12907, + freeVariables: Identifiers{ + "cindent", + }, + }, + Left: &LiteralString{ + NodeBase: NodeBase{ + loc: LocationRange{ + FileName: "", + Begin: Location{ + Line: int(939), + Column: int(65), + }, + End: Location{ + Line: int(939), + Column: int(69), + }, + file: p1, + }, + context: p12907, + freeVariables: nil, + }, + Value: "\n", + Kind: LiteralStringKind(1), + BlockIndent: "", + }, + Op: BinaryOp(3), + Right: &Var{ + NodeBase: NodeBase{ + loc: LocationRange{ + FileName: "", + Begin: Location{ + Line: int(939), + Column: int(72), + }, + End: Location{ + Line: int(939), + Column: int(79), + }, + file: p1, + }, + context: p12907, + freeVariables: Identifiers{ + "cindent", + }, + }, + Id: "cindent", }, - Value: "\n", - Kind: LiteralStringKind(1), - BlockIndent: "", }, &Var{ NodeBase: NodeBase{ @@ -141114,15 +142830,15 @@ var StdAst = &DesugaredObject{ FileName: "", Begin: Location{ Line: int(939), - Column: int(28), + Column: int(81), }, End: Location{ Line: int(939), - Column: int(33), + Column: int(86), }, file: p1, }, - context: p10292, + context: p12907, freeVariables: Identifiers{ "lines", }, @@ -141179,11 +142895,11 @@ var StdAst = &DesugaredObject{ }, End: Location{ Line: int(940), - Column: int(37), + Column: int(30), }, file: p1, }, - context: p9728, + context: p12335, freeVariables: Identifiers{ "aux", "value", @@ -141203,7 +142919,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9728, + context: p12335, freeVariables: Identifiers{ "aux", }, @@ -141226,7 +142942,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p10302, + context: p12921, freeVariables: Identifiers{ "value", }, @@ -141247,26 +142963,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p10302, - freeVariables: nil, - }, - Value: false, - }, - &LiteralBoolean{ - NodeBase: NodeBase{ - loc: LocationRange{ - FileName: "", - Begin: Location{ - Line: int(940), - Column: int(23), - }, - End: Location{ - Line: int(940), - Column: int(28), - }, - file: p1, - }, - context: p10302, + context: p12921, freeVariables: nil, }, Value: false, @@ -141277,15 +142974,15 @@ var StdAst = &DesugaredObject{ FileName: "", Begin: Location{ Line: int(940), - Column: int(30), + Column: int(23), }, End: Location{ Line: int(940), - Column: int(32), + Column: int(25), }, file: p1, }, - context: p10302, + context: p12921, freeVariables: nil, }, Elements: nil, @@ -141297,15 +142994,15 @@ var StdAst = &DesugaredObject{ FileName: "", Begin: Location{ Line: int(940), - Column: int(34), + Column: int(27), }, End: Location{ Line: int(940), - Column: int(36), + Column: int(29), }, file: p1, }, - context: p10302, + context: p12921, freeVariables: nil, }, Value: "", @@ -141380,7 +143077,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p12930, freeVariables: nil, }, }, @@ -141402,7 +143099,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p12932, freeVariables: nil, }, Value: "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/", @@ -141644,7 +143341,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p12956, freeVariables: Identifiers{ "base64_table", "i", @@ -141668,7 +143365,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p12960, freeVariables: Identifiers{ "base64_table", "i", @@ -141688,7 +143385,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p12963, freeVariables: Identifiers{ "base64_table", }, @@ -141709,7 +143406,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p12966, freeVariables: Identifiers{ "i", }, @@ -141732,7 +143429,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p43, + context: p12969, freeVariables: Identifiers{ "i", }, @@ -141761,7 +143458,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p12972, freeVariables: Identifiers{ "std", }, @@ -141780,7 +143477,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p12975, freeVariables: Identifiers{ "std", }, @@ -141799,7 +143496,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p12978, freeVariables: Identifiers{ "std", }, @@ -141845,7 +143542,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p54, + context: p12983, freeVariables: nil, }, Value: float64(0), @@ -141865,14 +143562,14 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p54, + context: p12985, freeVariables: nil, }, Value: float64(63), OriginalString: "63", }, }, - Named: nil, + Named: []NamedArgument{}, }, TrailingComma: false, TailStrict: false, @@ -141928,7 +143625,7 @@ var StdAst = &DesugaredObject{ }, file: nil, }, - context: p9, + context: p68, freeVariables: Identifiers{ "std", }, @@ -141954,7 +143651,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p10314, + context: p12991, freeVariables: Identifiers{ "std", "value", @@ -142080,7 +143777,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p10314, + context: p12991, freeVariables: Identifiers{ "std", "value", @@ -142100,7 +143797,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p10314, + context: p12991, freeVariables: Identifiers{ "std", }, @@ -142119,7 +143816,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p10314, + context: p12991, freeVariables: Identifiers{ "std", }, @@ -142165,7 +143862,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p10335, + context: p13012, freeVariables: Identifiers{ "value", }, @@ -142192,7 +143889,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p10314, + context: p12991, freeVariables: nil, }, Value: "array", @@ -142220,7 +143917,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p10314, + context: p12991, freeVariables: Identifiers{ "std", "value", @@ -142240,7 +143937,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p10314, + context: p12991, freeVariables: Identifiers{ "std", "value", @@ -142260,7 +143957,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p10314, + context: p12991, freeVariables: nil, }, Value: "manifestYamlStream only takes arrays, got ", @@ -142282,7 +143979,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p10314, + context: p12991, freeVariables: Identifiers{ "std", "value", @@ -142302,7 +143999,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p10314, + context: p12991, freeVariables: Identifiers{ "std", }, @@ -142321,7 +144018,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p10314, + context: p12991, freeVariables: Identifiers{ "std", }, @@ -142367,7 +144064,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p10352, + context: p13029, freeVariables: Identifiers{ "value", }, @@ -142396,7 +144093,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p10314, + context: p12991, freeVariables: Identifiers{ "std", "value", @@ -142416,7 +144113,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p10314, + context: p12991, freeVariables: Identifiers{ "std", "value", @@ -142436,7 +144133,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p10314, + context: p12991, freeVariables: nil, }, Value: "---\n", @@ -142458,7 +144155,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p10314, + context: p12991, freeVariables: Identifiers{ "std", "value", @@ -142478,7 +144175,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p10314, + context: p12991, freeVariables: Identifiers{ "std", }, @@ -142497,7 +144194,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p10314, + context: p12991, freeVariables: Identifiers{ "std", }, @@ -142543,7 +144240,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p10368, + context: p13045, freeVariables: nil, }, Value: "\n---\n", @@ -142696,7 +144393,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p10384, + context: p13061, freeVariables: Identifiers{ "e", "std", @@ -142716,7 +144413,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p10384, + context: p13061, freeVariables: Identifiers{ "std", }, @@ -142735,7 +144432,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p10384, + context: p13061, freeVariables: Identifiers{ "std", }, @@ -142781,7 +144478,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p10393, + context: p13070, freeVariables: Identifiers{ "e", }, @@ -142812,7 +144509,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p10368, + context: p13045, freeVariables: Identifiers{ "value", }, @@ -142847,7 +144544,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p10314, + context: p12991, freeVariables: nil, }, Value: "\n...\n", @@ -142917,7 +144614,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p13079, freeVariables: nil, }, }, @@ -142939,7 +144636,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p13081, freeVariables: nil, }, Value: "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/", @@ -143181,7 +144878,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p13105, freeVariables: Identifiers{ "base64_table", "i", @@ -143205,7 +144902,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p13109, freeVariables: Identifiers{ "base64_table", "i", @@ -143225,7 +144922,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p13112, freeVariables: Identifiers{ "base64_table", }, @@ -143246,7 +144943,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p13115, freeVariables: Identifiers{ "i", }, @@ -143269,7 +144966,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p43, + context: p13118, freeVariables: Identifiers{ "i", }, @@ -143298,7 +144995,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p13121, freeVariables: Identifiers{ "std", }, @@ -143317,7 +145014,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p13124, freeVariables: Identifiers{ "std", }, @@ -143336,7 +145033,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p13127, freeVariables: Identifiers{ "std", }, @@ -143382,7 +145079,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p54, + context: p13132, freeVariables: nil, }, Value: float64(0), @@ -143402,14 +145099,14 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p54, + context: p13134, freeVariables: nil, }, Value: float64(63), OriginalString: "63", }, }, - Named: nil, + Named: []NamedArgument{}, }, TrailingComma: false, TailStrict: false, @@ -143465,7 +145162,7 @@ var StdAst = &DesugaredObject{ }, file: nil, }, - context: p9, + context: p68, freeVariables: Identifiers{ "std", }, @@ -143491,7 +145188,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p10404, + context: p13140, freeVariables: Identifiers{ "o", "std", @@ -143596,7 +145293,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p10404, + context: p13140, freeVariables: Identifiers{ "o", "std", @@ -143616,7 +145313,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p10404, + context: p13140, freeVariables: Identifiers{ "std", }, @@ -143635,7 +145332,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p10404, + context: p13140, freeVariables: Identifiers{ "std", }, @@ -143681,7 +145378,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p10423, + context: p13159, freeVariables: Identifiers{ "o", }, @@ -143708,7 +145405,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p10404, + context: p13140, freeVariables: nil, }, Value: "object", @@ -143735,7 +145432,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p10404, + context: p13140, freeVariables: Identifiers{ "o", "std", @@ -143978,7 +145675,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p10452, + context: p13188, freeVariables: nil, }, Value: "%s: %s", @@ -143999,7 +145696,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p10452, + context: p13188, freeVariables: Identifiers{ "k", "o", @@ -144021,7 +145718,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p10457, + context: p13193, freeVariables: Identifiers{ "k", "std", @@ -144041,7 +145738,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p10457, + context: p13193, freeVariables: Identifiers{ "std", }, @@ -144060,7 +145757,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p10457, + context: p13193, freeVariables: Identifiers{ "std", }, @@ -144106,7 +145803,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p10466, + context: p13202, freeVariables: Identifiers{ "k", }, @@ -144133,7 +145830,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p10457, + context: p13193, freeVariables: Identifiers{ "k", "o", @@ -144154,7 +145851,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p10457, + context: p13193, freeVariables: Identifiers{ "std", }, @@ -144173,7 +145870,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p10457, + context: p13193, freeVariables: Identifiers{ "std", }, @@ -144219,7 +145916,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p10477, + context: p13213, freeVariables: Identifiers{ "k", "o", @@ -144239,7 +145936,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p10477, + context: p13213, freeVariables: Identifiers{ "o", }, @@ -144260,7 +145957,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p10477, + context: p13213, freeVariables: Identifiers{ "k", }, @@ -144302,7 +145999,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p10484, + context: p13220, freeVariables: Identifiers{ "o", "std", @@ -144322,7 +146019,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p10484, + context: p13220, freeVariables: Identifiers{ "std", }, @@ -144341,7 +146038,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p10484, + context: p13220, freeVariables: Identifiers{ "std", }, @@ -144387,7 +146084,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p10493, + context: p13229, freeVariables: Identifiers{ "o", }, @@ -144508,7 +146205,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p10404, + context: p13140, freeVariables: nil, }, Value: "{%s}", @@ -144529,7 +146226,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p10404, + context: p13140, freeVariables: Identifiers{ "fields", "std", @@ -144550,7 +146247,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p10508, + context: p13244, freeVariables: Identifiers{ "fields", "std", @@ -144570,7 +146267,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p10508, + context: p13244, freeVariables: Identifiers{ "std", }, @@ -144589,7 +146286,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p10508, + context: p13244, freeVariables: Identifiers{ "std", }, @@ -144635,7 +146332,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p10517, + context: p13253, freeVariables: nil, }, Value: ", ", @@ -144656,7 +146353,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p10517, + context: p13253, freeVariables: Identifiers{ "fields", }, @@ -144693,7 +146390,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p10404, + context: p13140, freeVariables: Identifiers{ "o", "std", @@ -144798,7 +146495,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p10404, + context: p13140, freeVariables: Identifiers{ "o", "std", @@ -144818,7 +146515,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p10404, + context: p13140, freeVariables: Identifiers{ "std", }, @@ -144837,7 +146534,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p10404, + context: p13140, freeVariables: Identifiers{ "std", }, @@ -144883,7 +146580,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p10539, + context: p13275, freeVariables: Identifiers{ "o", }, @@ -144910,7 +146607,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p10404, + context: p13140, freeVariables: nil, }, Value: "array", @@ -145022,7 +146719,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p10404, + context: p13140, freeVariables: nil, }, Value: "[%s]", @@ -145043,7 +146740,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p10404, + context: p13140, freeVariables: Identifiers{ "o", "std", @@ -145064,7 +146761,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p10555, + context: p13291, freeVariables: Identifiers{ "o", "std", @@ -145084,7 +146781,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p10555, + context: p13291, freeVariables: Identifiers{ "std", }, @@ -145103,7 +146800,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p10555, + context: p13291, freeVariables: Identifiers{ "std", }, @@ -145149,7 +146846,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p10564, + context: p13300, freeVariables: nil, }, Value: ", ", @@ -145302,7 +146999,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p10580, + context: p13316, freeVariables: Identifiers{ "o2", "std", @@ -145322,7 +147019,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p10580, + context: p13316, freeVariables: Identifiers{ "std", }, @@ -145341,7 +147038,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p10580, + context: p13316, freeVariables: Identifiers{ "std", }, @@ -145387,7 +147084,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p10589, + context: p13325, freeVariables: Identifiers{ "o2", }, @@ -145418,7 +147115,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p10564, + context: p13300, freeVariables: Identifiers{ "o", }, @@ -145460,7 +147157,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p10404, + context: p13140, freeVariables: Identifiers{ "o", "std", @@ -145565,7 +147262,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p10404, + context: p13140, freeVariables: Identifiers{ "o", "std", @@ -145585,7 +147282,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p10404, + context: p13140, freeVariables: Identifiers{ "std", }, @@ -145604,7 +147301,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p10404, + context: p13140, freeVariables: Identifiers{ "std", }, @@ -145650,7 +147347,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p10612, + context: p13348, freeVariables: Identifiers{ "o", }, @@ -145677,7 +147374,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p10404, + context: p13140, freeVariables: nil, }, Value: "string", @@ -145789,7 +147486,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p10404, + context: p13140, freeVariables: nil, }, Value: "%s", @@ -145810,7 +147507,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p10404, + context: p13140, freeVariables: Identifiers{ "o", "std", @@ -145831,7 +147528,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p10628, + context: p13364, freeVariables: Identifiers{ "o", "std", @@ -145851,7 +147548,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p10628, + context: p13364, freeVariables: Identifiers{ "std", }, @@ -145870,7 +147567,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p10628, + context: p13364, freeVariables: Identifiers{ "std", }, @@ -145916,7 +147613,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p10637, + context: p13373, freeVariables: Identifiers{ "o", }, @@ -145952,7 +147649,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p10404, + context: p13140, freeVariables: Identifiers{ "o", "std", @@ -146057,7 +147754,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p10404, + context: p13140, freeVariables: Identifiers{ "o", "std", @@ -146077,7 +147774,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p10404, + context: p13140, freeVariables: Identifiers{ "std", }, @@ -146096,7 +147793,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p10404, + context: p13140, freeVariables: Identifiers{ "std", }, @@ -146142,7 +147839,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p10658, + context: p13394, freeVariables: Identifiers{ "o", }, @@ -146169,7 +147866,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p10404, + context: p13140, freeVariables: nil, }, Value: "function", @@ -146196,7 +147893,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p10404, + context: p13140, freeVariables: nil, }, Expr: &LiteralString{ @@ -146213,7 +147910,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p10404, + context: p13140, freeVariables: nil, }, Value: "cannot manifest function", @@ -146235,7 +147932,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p10404, + context: p13140, freeVariables: Identifiers{ "o", "std", @@ -146340,7 +148037,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p10404, + context: p13140, freeVariables: Identifiers{ "o", "std", @@ -146360,7 +148057,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p10404, + context: p13140, freeVariables: Identifiers{ "std", }, @@ -146379,7 +148076,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p10404, + context: p13140, freeVariables: Identifiers{ "std", }, @@ -146425,7 +148122,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p10682, + context: p13418, freeVariables: Identifiers{ "o", }, @@ -146452,7 +148149,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p10404, + context: p13140, freeVariables: nil, }, Value: "number", @@ -146479,7 +148176,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p10404, + context: p13140, freeVariables: Identifiers{ "o", "std", @@ -146499,7 +148196,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p10404, + context: p13140, freeVariables: Identifiers{ "std", }, @@ -146518,7 +148215,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p10404, + context: p13140, freeVariables: Identifiers{ "std", }, @@ -146564,7 +148261,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p10694, + context: p13430, freeVariables: Identifiers{ "o", }, @@ -146591,7 +148288,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p10404, + context: p13140, freeVariables: Identifiers{ "o", "std", @@ -146696,7 +148393,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p10404, + context: p13140, freeVariables: Identifiers{ "o", }, @@ -146717,7 +148414,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p10404, + context: p13140, freeVariables: nil, }, Value: true, @@ -146742,7 +148439,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p10404, + context: p13140, freeVariables: nil, }, Value: "True", @@ -146763,7 +148460,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p10404, + context: p13140, freeVariables: Identifiers{ "o", "std", @@ -146868,7 +148565,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p10404, + context: p13140, freeVariables: Identifiers{ "o", }, @@ -146889,7 +148586,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p10404, + context: p13140, freeVariables: nil, }, Value: false, @@ -146914,7 +148611,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p10404, + context: p13140, freeVariables: nil, }, Value: "False", @@ -146935,7 +148632,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p10404, + context: p13140, freeVariables: Identifiers{ "o", "std", @@ -147040,7 +148737,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p10404, + context: p13140, freeVariables: Identifiers{ "o", }, @@ -147061,7 +148758,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p10404, + context: p13140, freeVariables: nil, }, }, @@ -147085,7 +148782,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p10404, + context: p13140, freeVariables: nil, }, Value: "None", @@ -147179,7 +148876,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p13479, freeVariables: nil, }, }, @@ -147201,7 +148898,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p13481, freeVariables: nil, }, Value: "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/", @@ -147443,7 +149140,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p13505, freeVariables: Identifiers{ "base64_table", "i", @@ -147467,7 +149164,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p13509, freeVariables: Identifiers{ "base64_table", "i", @@ -147487,7 +149184,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p13512, freeVariables: Identifiers{ "base64_table", }, @@ -147508,7 +149205,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p13515, freeVariables: Identifiers{ "i", }, @@ -147531,7 +149228,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p43, + context: p13518, freeVariables: Identifiers{ "i", }, @@ -147560,7 +149257,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p13521, freeVariables: Identifiers{ "std", }, @@ -147579,7 +149276,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p13524, freeVariables: Identifiers{ "std", }, @@ -147598,7 +149295,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p13527, freeVariables: Identifiers{ "std", }, @@ -147644,7 +149341,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p54, + context: p13532, freeVariables: nil, }, Value: float64(0), @@ -147664,14 +149361,14 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p54, + context: p13534, freeVariables: nil, }, Value: float64(63), OriginalString: "63", }, }, - Named: nil, + Named: []NamedArgument{}, }, TrailingComma: false, TailStrict: false, @@ -147727,7 +149424,7 @@ var StdAst = &DesugaredObject{ }, file: nil, }, - context: p9, + context: p68, freeVariables: Identifiers{ "std", }, @@ -147753,7 +149450,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p10745, + context: p13540, freeVariables: Identifiers{ "conf", "std", @@ -147996,7 +149693,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p10771, + context: p13566, freeVariables: nil, }, Value: "%s = %s", @@ -148017,7 +149714,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p10771, + context: p13566, freeVariables: Identifiers{ "conf", "k", @@ -148039,7 +149736,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p10776, + context: p13571, freeVariables: Identifiers{ "k", }, @@ -148060,7 +149757,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p10776, + context: p13571, freeVariables: Identifiers{ "conf", "k", @@ -148081,7 +149778,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p10776, + context: p13571, freeVariables: Identifiers{ "std", }, @@ -148100,7 +149797,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p10776, + context: p13571, freeVariables: Identifiers{ "std", }, @@ -148146,7 +149843,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p10787, + context: p13582, freeVariables: Identifiers{ "conf", "k", @@ -148166,7 +149863,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p10787, + context: p13582, freeVariables: Identifiers{ "conf", }, @@ -148187,7 +149884,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p10787, + context: p13582, freeVariables: Identifiers{ "k", }, @@ -148229,7 +149926,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p10794, + context: p13589, freeVariables: Identifiers{ "conf", "std", @@ -148249,7 +149946,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p10794, + context: p13589, freeVariables: Identifiers{ "std", }, @@ -148268,7 +149965,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p10794, + context: p13589, freeVariables: Identifiers{ "std", }, @@ -148314,7 +150011,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p10803, + context: p13598, freeVariables: Identifiers{ "conf", }, @@ -148350,7 +150047,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p10745, + context: p13540, freeVariables: Identifiers{ "std", "vars", @@ -148370,7 +150067,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p10745, + context: p13540, freeVariables: Identifiers{ "std", }, @@ -148389,7 +150086,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p10745, + context: p13540, freeVariables: Identifiers{ "std", }, @@ -148435,7 +150132,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p10814, + context: p13609, freeVariables: nil, }, Value: "\n", @@ -148456,7 +150153,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p10814, + context: p13609, freeVariables: Identifiers{ "vars", }, @@ -148475,7 +150172,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p10814, + context: p13609, freeVariables: Identifiers{ "vars", }, @@ -148497,7 +150194,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p10814, + context: p13609, freeVariables: nil, }, Elements: Nodes{ @@ -148515,7 +150212,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p10822, + context: p13617, freeVariables: nil, }, Value: "", @@ -148594,7 +150291,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p13622, freeVariables: nil, }, }, @@ -148616,7 +150313,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p13624, freeVariables: nil, }, Value: "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/", @@ -148858,7 +150555,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p13648, freeVariables: Identifiers{ "base64_table", "i", @@ -148882,7 +150579,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p13652, freeVariables: Identifiers{ "base64_table", "i", @@ -148902,7 +150599,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p13655, freeVariables: Identifiers{ "base64_table", }, @@ -148923,7 +150620,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p13658, freeVariables: Identifiers{ "i", }, @@ -148946,7 +150643,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p43, + context: p13661, freeVariables: Identifiers{ "i", }, @@ -148975,7 +150672,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p13664, freeVariables: Identifiers{ "std", }, @@ -148994,7 +150691,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p13667, freeVariables: Identifiers{ "std", }, @@ -149013,7 +150710,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p13670, freeVariables: Identifiers{ "std", }, @@ -149059,7 +150756,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p54, + context: p13675, freeVariables: nil, }, Value: float64(0), @@ -149079,14 +150776,14 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p54, + context: p13677, freeVariables: nil, }, Value: float64(63), OriginalString: "63", }, }, - Named: nil, + Named: []NamedArgument{}, }, TrailingComma: false, TailStrict: false, @@ -149142,7 +150839,7 @@ var StdAst = &DesugaredObject{ }, file: nil, }, - context: p9, + context: p68, freeVariables: Identifiers{ "std", }, @@ -149168,7 +150865,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p10829, + context: p13683, freeVariables: Identifiers{ "std", "value", @@ -149188,7 +150885,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p10829, + context: p13683, freeVariables: Identifiers{ "std", "value", @@ -149209,7 +150906,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p10829, + context: p13683, freeVariables: Identifiers{ "std", "value", @@ -149229,7 +150926,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p10829, + context: p13683, freeVariables: Identifiers{ "std", }, @@ -149248,7 +150945,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p10829, + context: p13683, freeVariables: Identifiers{ "std", }, @@ -149294,7 +150991,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p10842, + context: p13696, freeVariables: Identifiers{ "value", }, @@ -149322,7 +151019,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p10829, + context: p13683, freeVariables: Identifiers{ "std", "value", @@ -149427,7 +151124,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p10829, + context: p13683, freeVariables: nil, }, Value: "Expected a JSONML value (an array), got %s", @@ -149448,7 +151145,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p10829, + context: p13683, freeVariables: Identifiers{ "std", "value", @@ -149468,7 +151165,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p10829, + context: p13683, freeVariables: Identifiers{ "std", }, @@ -149487,7 +151184,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p10829, + context: p13683, freeVariables: Identifiers{ "std", }, @@ -149533,7 +151230,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p10864, + context: p13718, freeVariables: Identifiers{ "value", }, @@ -149567,7 +151264,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p10829, + context: p13683, freeVariables: Identifiers{ "std", "value", @@ -149590,7 +151287,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p10870, + context: p13724, freeVariables: Identifiers{ "aux", "std", @@ -149617,7 +151314,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p10874, + context: p13728, freeVariables: Identifiers{ "aux", "std", @@ -149638,7 +151335,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p10874, + context: p13728, freeVariables: Identifiers{ "std", "v", @@ -149658,7 +151355,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p10874, + context: p13728, freeVariables: Identifiers{ "std", }, @@ -149677,7 +151374,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p10874, + context: p13728, freeVariables: Identifiers{ "std", }, @@ -149723,7 +151420,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p10885, + context: p13739, freeVariables: Identifiers{ "v", }, @@ -149750,7 +151447,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p10874, + context: p13728, freeVariables: Identifiers{ "v", }, @@ -149771,7 +151468,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p10874, + context: p13728, freeVariables: Identifiers{ "aux", "std", @@ -149795,7 +151492,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p10893, + context: p13747, freeVariables: Identifiers{ "v", }, @@ -149814,7 +151511,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p10893, + context: p13747, freeVariables: Identifiers{ "v", }, @@ -149835,7 +151532,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p10893, + context: p13747, freeVariables: nil, }, Value: float64(0), @@ -149860,7 +151557,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p10874, + context: p13728, freeVariables: Identifiers{ "aux", "std", @@ -149885,7 +151582,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p10902, + context: p13756, freeVariables: Identifiers{ "std", "v", @@ -149905,7 +151602,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p10902, + context: p13756, freeVariables: Identifiers{ "std", "v", @@ -149925,7 +151622,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p10902, + context: p13756, freeVariables: Identifiers{ "std", "v", @@ -149945,7 +151642,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p10902, + context: p13756, freeVariables: Identifiers{ "std", }, @@ -149964,7 +151661,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p10902, + context: p13756, freeVariables: Identifiers{ "std", }, @@ -150010,7 +151707,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p10915, + context: p13769, freeVariables: Identifiers{ "v", }, @@ -150038,7 +151735,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p10902, + context: p13756, freeVariables: nil, }, Value: float64(1), @@ -150145,7 +151842,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p10902, + context: p13756, freeVariables: Identifiers{ "std", "v", @@ -150165,7 +151862,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p10902, + context: p13756, freeVariables: Identifiers{ "std", }, @@ -150184,7 +151881,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p10902, + context: p13756, freeVariables: Identifiers{ "std", }, @@ -150230,7 +151927,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p10935, + context: p13789, freeVariables: Identifiers{ "v", }, @@ -150249,7 +151946,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p10935, + context: p13789, freeVariables: Identifiers{ "v", }, @@ -150270,7 +151967,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p10935, + context: p13789, freeVariables: nil, }, Value: float64(1), @@ -150298,7 +151995,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p10902, + context: p13756, freeVariables: nil, }, Value: "object", @@ -150329,7 +152026,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p10874, + context: p13728, freeVariables: Identifiers{ "aux", "has_attrs", @@ -150355,7 +152052,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p10945, + context: p13799, freeVariables: Identifiers{ "has_attrs", "v", @@ -150375,7 +152072,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p10945, + context: p13799, freeVariables: Identifiers{ "has_attrs", }, @@ -150396,7 +152093,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p10945, + context: p13799, freeVariables: Identifiers{ "v", }, @@ -150415,7 +152112,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p10945, + context: p13799, freeVariables: Identifiers{ "v", }, @@ -150436,7 +152133,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p10945, + context: p13799, freeVariables: nil, }, Value: float64(1), @@ -150458,7 +152155,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p10945, + context: p13799, freeVariables: nil, }, Asserts: nil, @@ -150482,7 +152179,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p10874, + context: p13728, freeVariables: Identifiers{ "attrs", "aux", @@ -150509,7 +152206,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p10959, + context: p13813, freeVariables: Identifiers{ "has_attrs", "std", @@ -150530,7 +152227,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p10959, + context: p13813, freeVariables: Identifiers{ "has_attrs", }, @@ -150636,7 +152333,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p10959, + context: p13813, freeVariables: Identifiers{ "v", }, @@ -150657,7 +152354,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p10959, + context: p13813, freeVariables: nil, }, Value: float64(2), @@ -150804,7 +152501,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p10959, + context: p13813, freeVariables: Identifiers{ "v", }, @@ -150825,7 +152522,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p10959, + context: p13813, freeVariables: nil, }, Value: float64(1), @@ -150891,7 +152588,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p10874, + context: p13728, freeVariables: Identifiers{ "attrs", "aux", @@ -150917,7 +152614,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p10993, + context: p13847, freeVariables: Identifiers{ "attrs", "std", @@ -150937,7 +152634,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p10993, + context: p13847, freeVariables: Identifiers{ "std", }, @@ -150956,7 +152653,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p10993, + context: p13847, freeVariables: Identifiers{ "std", }, @@ -151002,7 +152699,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11002, + context: p13856, freeVariables: nil, }, Value: "", @@ -151243,7 +152940,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11026, + context: p13880, freeVariables: nil, }, Value: " %s=\"%s\"", @@ -151264,7 +152961,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11026, + context: p13880, freeVariables: Identifiers{ "attrs", "k", @@ -151285,7 +152982,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11031, + context: p13885, freeVariables: Identifiers{ "k", }, @@ -151306,7 +153003,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11031, + context: p13885, freeVariables: Identifiers{ "attrs", "k", @@ -151326,7 +153023,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11031, + context: p13885, freeVariables: Identifiers{ "attrs", }, @@ -151347,7 +153044,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11031, + context: p13885, freeVariables: Identifiers{ "k", }, @@ -151383,7 +153080,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11002, + context: p13856, freeVariables: Identifiers{ "attrs", "std", @@ -151403,7 +153100,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11002, + context: p13856, freeVariables: Identifiers{ "std", }, @@ -151422,7 +153119,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11002, + context: p13856, freeVariables: Identifiers{ "std", }, @@ -151468,7 +153165,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11048, + context: p13902, freeVariables: Identifiers{ "attrs", }, @@ -151510,7 +153207,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p10874, + context: p13728, freeVariables: Identifiers{ "attrs_str", "aux", @@ -151533,7 +153230,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p10874, + context: p13728, freeVariables: Identifiers{ "std", }, @@ -151552,7 +153249,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p10874, + context: p13728, freeVariables: Identifiers{ "std", }, @@ -151598,7 +153295,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11059, + context: p13913, freeVariables: Identifiers{ "attrs_str", "aux", @@ -151622,7 +153319,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11063, + context: p13917, freeVariables: nil, }, Value: "<", @@ -151643,7 +153340,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11063, + context: p13917, freeVariables: Identifiers{ "tag", }, @@ -151664,7 +153361,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11063, + context: p13917, freeVariables: Identifiers{ "attrs_str", }, @@ -151685,7 +153382,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11063, + context: p13917, freeVariables: nil, }, Value: ">", @@ -151839,7 +153536,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11084, + context: p13938, freeVariables: Identifiers{ "aux", "x", @@ -151859,7 +153556,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11084, + context: p13938, freeVariables: Identifiers{ "aux", }, @@ -151882,7 +153579,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11090, + context: p13944, freeVariables: Identifiers{ "x", }, @@ -151913,7 +153610,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11063, + context: p13917, freeVariables: Identifiers{ "children", }, @@ -151940,7 +153637,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11063, + context: p13917, freeVariables: nil, }, Value: "", @@ -152022,7 +153719,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p10829, + context: p13683, freeVariables: Identifiers{ "aux", "value", @@ -152042,7 +153739,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p10829, + context: p13683, freeVariables: Identifiers{ "aux", }, @@ -152065,7 +153762,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11104, + context: p13958, freeVariables: Identifiers{ "value", }, @@ -152141,7 +153838,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p13964, freeVariables: nil, }, }, @@ -152163,7 +153860,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p13966, freeVariables: nil, }, Value: "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/", @@ -152405,7 +154102,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p13990, freeVariables: Identifiers{ "base64_table", "i", @@ -152429,7 +154126,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p13994, freeVariables: Identifiers{ "base64_table", "i", @@ -152449,7 +154146,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p13997, freeVariables: Identifiers{ "base64_table", }, @@ -152470,7 +154167,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p14000, freeVariables: Identifiers{ "i", }, @@ -152493,7 +154190,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p43, + context: p14003, freeVariables: Identifiers{ "i", }, @@ -152522,7 +154219,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p14006, freeVariables: Identifiers{ "std", }, @@ -152541,7 +154238,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p14009, freeVariables: Identifiers{ "std", }, @@ -152560,7 +154257,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p14012, freeVariables: Identifiers{ "std", }, @@ -152606,7 +154303,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p54, + context: p14017, freeVariables: nil, }, Value: float64(0), @@ -152626,14 +154323,14 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p54, + context: p14019, freeVariables: nil, }, Value: float64(63), OriginalString: "63", }, }, - Named: nil, + Named: []NamedArgument{}, }, TrailingComma: false, TailStrict: false, @@ -152689,7 +154386,7 @@ var StdAst = &DesugaredObject{ }, file: nil, }, - context: p9, + context: p68, freeVariables: Identifiers{ "base64_table", "std", @@ -152716,7 +154413,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11112, + context: p14025, freeVariables: Identifiers{ "base64_table", "input", @@ -152740,7 +154437,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11116, + context: p14029, freeVariables: Identifiers{ "input", "std", @@ -152845,7 +154542,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11116, + context: p14029, freeVariables: Identifiers{ "input", "std", @@ -152865,7 +154562,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11116, + context: p14029, freeVariables: Identifiers{ "std", }, @@ -152884,7 +154581,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11116, + context: p14029, freeVariables: Identifiers{ "std", }, @@ -152930,7 +154627,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11135, + context: p14048, freeVariables: Identifiers{ "input", }, @@ -152957,7 +154654,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11116, + context: p14029, freeVariables: nil, }, Value: "string", @@ -152984,7 +154681,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11116, + context: p14029, freeVariables: Identifiers{ "input", "std", @@ -153004,7 +154701,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11116, + context: p14029, freeVariables: Identifiers{ "std", }, @@ -153023,7 +154720,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11116, + context: p14029, freeVariables: Identifiers{ "std", }, @@ -153069,7 +154766,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11147, + context: p14060, freeVariables: Identifiers{ "std", }, @@ -153095,7 +154792,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11151, + context: p14064, freeVariables: Identifiers{ "c", "std", @@ -153115,7 +154812,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11151, + context: p14064, freeVariables: Identifiers{ "std", }, @@ -153134,7 +154831,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11151, + context: p14064, freeVariables: Identifiers{ "std", }, @@ -153180,7 +154877,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11160, + context: p14073, freeVariables: Identifiers{ "c", }, @@ -153208,7 +154905,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11147, + context: p14060, freeVariables: Identifiers{ "input", }, @@ -153235,7 +154932,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11116, + context: p14029, freeVariables: Identifiers{ "input", }, @@ -153260,7 +154957,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11112, + context: p14025, freeVariables: Identifiers{ "base64_table", "bytes", @@ -153284,7 +154981,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11170, + context: p14083, freeVariables: Identifiers{ "aux", "base64_table", @@ -153314,7 +155011,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11174, + context: p14087, freeVariables: Identifiers{ "arr", "aux", @@ -153338,7 +155035,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11174, + context: p14087, freeVariables: Identifiers{ "arr", "i", @@ -153359,7 +155056,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11174, + context: p14087, freeVariables: Identifiers{ "i", }, @@ -153381,7 +155078,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11174, + context: p14087, freeVariables: Identifiers{ "arr", "std", @@ -153401,7 +155098,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11174, + context: p14087, freeVariables: Identifiers{ "std", }, @@ -153420,7 +155117,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11174, + context: p14087, freeVariables: Identifiers{ "std", }, @@ -153466,7 +155163,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11189, + context: p14102, freeVariables: Identifiers{ "arr", }, @@ -153494,7 +155191,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11174, + context: p14087, freeVariables: Identifiers{ "r", }, @@ -153515,7 +155212,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11174, + context: p14087, freeVariables: Identifiers{ "arr", "aux", @@ -153539,7 +155236,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11174, + context: p14087, freeVariables: Identifiers{ "arr", "i", @@ -153560,7 +155257,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11174, + context: p14087, freeVariables: Identifiers{ "i", }, @@ -153579,7 +155276,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11174, + context: p14087, freeVariables: Identifiers{ "i", }, @@ -153601,7 +155298,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11174, + context: p14087, freeVariables: nil, }, Value: float64(1), @@ -153623,7 +155320,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11174, + context: p14087, freeVariables: Identifiers{ "arr", "std", @@ -153643,7 +155340,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11174, + context: p14087, freeVariables: Identifiers{ "std", }, @@ -153662,7 +155359,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11174, + context: p14087, freeVariables: Identifiers{ "std", }, @@ -153708,7 +155405,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11211, + context: p14124, freeVariables: Identifiers{ "arr", }, @@ -153736,7 +155433,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11174, + context: p14087, freeVariables: Identifiers{ "arr", "aux", @@ -153762,7 +155459,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11217, + context: p14130, freeVariables: Identifiers{ "arr", "base64_table", @@ -153783,7 +155480,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11217, + context: p14130, freeVariables: Identifiers{ "arr", "base64_table", @@ -153804,7 +155501,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11217, + context: p14130, freeVariables: Identifiers{ "arr", "base64_table", @@ -153825,7 +155522,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11217, + context: p14130, freeVariables: Identifiers{ "base64_table", }, @@ -153846,7 +155543,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11217, + context: p14130, freeVariables: Identifiers{ "arr", "i", @@ -153866,7 +155563,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11217, + context: p14130, freeVariables: Identifiers{ "arr", "i", @@ -153886,7 +155583,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11217, + context: p14130, freeVariables: Identifiers{ "arr", "i", @@ -153906,7 +155603,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11217, + context: p14130, freeVariables: Identifiers{ "arr", }, @@ -153927,7 +155624,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11217, + context: p14130, freeVariables: Identifiers{ "i", }, @@ -153951,7 +155648,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11217, + context: p14130, freeVariables: nil, }, Value: float64(252), @@ -153973,7 +155670,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11217, + context: p14130, freeVariables: nil, }, Value: float64(2), @@ -153997,7 +155694,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11217, + context: p14130, freeVariables: Identifiers{ "arr", "base64_table", @@ -154018,7 +155715,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11217, + context: p14130, freeVariables: Identifiers{ "base64_table", }, @@ -154039,7 +155736,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11217, + context: p14130, freeVariables: Identifiers{ "arr", "i", @@ -154059,7 +155756,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11217, + context: p14130, freeVariables: Identifiers{ "arr", "i", @@ -154079,7 +155776,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11217, + context: p14130, freeVariables: Identifiers{ "arr", "i", @@ -154099,7 +155796,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11217, + context: p14130, freeVariables: Identifiers{ "arr", }, @@ -154120,7 +155817,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11217, + context: p14130, freeVariables: Identifiers{ "i", }, @@ -154144,7 +155841,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11217, + context: p14130, freeVariables: nil, }, Value: float64(3), @@ -154166,7 +155863,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11217, + context: p14130, freeVariables: nil, }, Value: float64(4), @@ -154191,7 +155888,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11217, + context: p14130, freeVariables: nil, }, Value: "==", @@ -154216,7 +155913,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11174, + context: p14087, freeVariables: Identifiers{ "arr", "aux", @@ -154239,7 +155936,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11174, + context: p14087, freeVariables: Identifiers{ "aux", }, @@ -154262,7 +155959,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11260, + context: p14173, freeVariables: Identifiers{ "arr", }, @@ -154283,7 +155980,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11260, + context: p14173, freeVariables: Identifiers{ "i", }, @@ -154302,7 +155999,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11260, + context: p14173, freeVariables: Identifiers{ "i", }, @@ -154324,7 +156021,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11260, + context: p14173, freeVariables: nil, }, Value: float64(3), @@ -154345,7 +156042,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11260, + context: p14173, freeVariables: Identifiers{ "r", "str", @@ -154365,7 +156062,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11260, + context: p14173, freeVariables: Identifiers{ "r", }, @@ -154387,7 +156084,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11260, + context: p14173, freeVariables: Identifiers{ "str", }, @@ -154416,7 +156113,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11174, + context: p14087, freeVariables: Identifiers{ "arr", "aux", @@ -154440,7 +156137,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11174, + context: p14087, freeVariables: Identifiers{ "arr", "i", @@ -154461,7 +156158,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11174, + context: p14087, freeVariables: Identifiers{ "i", }, @@ -154480,7 +156177,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11174, + context: p14087, freeVariables: Identifiers{ "i", }, @@ -154502,7 +156199,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11174, + context: p14087, freeVariables: nil, }, Value: float64(2), @@ -154524,7 +156221,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11174, + context: p14087, freeVariables: Identifiers{ "arr", "std", @@ -154544,7 +156241,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11174, + context: p14087, freeVariables: Identifiers{ "std", }, @@ -154563,7 +156260,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11174, + context: p14087, freeVariables: Identifiers{ "std", }, @@ -154609,7 +156306,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11291, + context: p14204, freeVariables: Identifiers{ "arr", }, @@ -154637,7 +156334,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11174, + context: p14087, freeVariables: Identifiers{ "arr", "aux", @@ -154663,7 +156360,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11297, + context: p14210, freeVariables: Identifiers{ "arr", "base64_table", @@ -154684,7 +156381,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11297, + context: p14210, freeVariables: Identifiers{ "arr", "base64_table", @@ -154705,7 +156402,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11297, + context: p14210, freeVariables: Identifiers{ "arr", "base64_table", @@ -154726,7 +156423,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11297, + context: p14210, freeVariables: Identifiers{ "arr", "base64_table", @@ -154747,7 +156444,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11297, + context: p14210, freeVariables: Identifiers{ "base64_table", }, @@ -154768,7 +156465,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11297, + context: p14210, freeVariables: Identifiers{ "arr", "i", @@ -154788,7 +156485,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11297, + context: p14210, freeVariables: Identifiers{ "arr", "i", @@ -154808,7 +156505,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11297, + context: p14210, freeVariables: Identifiers{ "arr", "i", @@ -154828,7 +156525,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11297, + context: p14210, freeVariables: Identifiers{ "arr", }, @@ -154849,7 +156546,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11297, + context: p14210, freeVariables: Identifiers{ "i", }, @@ -154873,7 +156570,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11297, + context: p14210, freeVariables: nil, }, Value: float64(252), @@ -154895,7 +156592,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11297, + context: p14210, freeVariables: nil, }, Value: float64(2), @@ -154919,7 +156616,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11297, + context: p14210, freeVariables: Identifiers{ "arr", "base64_table", @@ -154940,7 +156637,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11297, + context: p14210, freeVariables: Identifiers{ "base64_table", }, @@ -154961,7 +156658,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11297, + context: p14210, freeVariables: Identifiers{ "arr", "i", @@ -154981,7 +156678,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11297, + context: p14210, freeVariables: Identifiers{ "arr", "i", @@ -155001,7 +156698,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11297, + context: p14210, freeVariables: Identifiers{ "arr", "i", @@ -155021,7 +156718,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11297, + context: p14210, freeVariables: Identifiers{ "arr", "i", @@ -155041,7 +156738,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11297, + context: p14210, freeVariables: Identifiers{ "arr", }, @@ -155062,7 +156759,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11297, + context: p14210, freeVariables: Identifiers{ "i", }, @@ -155086,7 +156783,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11297, + context: p14210, freeVariables: nil, }, Value: float64(3), @@ -155108,7 +156805,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11297, + context: p14210, freeVariables: nil, }, Value: float64(4), @@ -155130,7 +156827,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11297, + context: p14210, freeVariables: Identifiers{ "arr", "i", @@ -155150,7 +156847,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11297, + context: p14210, freeVariables: Identifiers{ "arr", "i", @@ -155170,7 +156867,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11297, + context: p14210, freeVariables: Identifiers{ "arr", "i", @@ -155190,7 +156887,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11297, + context: p14210, freeVariables: Identifiers{ "arr", }, @@ -155211,7 +156908,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11297, + context: p14210, freeVariables: Identifiers{ "i", }, @@ -155230,7 +156927,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11297, + context: p14210, freeVariables: Identifiers{ "i", }, @@ -155252,7 +156949,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11297, + context: p14210, freeVariables: nil, }, Value: float64(1), @@ -155276,7 +156973,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11297, + context: p14210, freeVariables: nil, }, Value: float64(240), @@ -155298,7 +156995,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11297, + context: p14210, freeVariables: nil, }, Value: float64(4), @@ -155324,7 +157021,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11297, + context: p14210, freeVariables: Identifiers{ "arr", "base64_table", @@ -155345,7 +157042,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11297, + context: p14210, freeVariables: Identifiers{ "base64_table", }, @@ -155366,7 +157063,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11297, + context: p14210, freeVariables: Identifiers{ "arr", "i", @@ -155386,7 +157083,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11297, + context: p14210, freeVariables: Identifiers{ "arr", "i", @@ -155406,7 +157103,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11297, + context: p14210, freeVariables: Identifiers{ "arr", "i", @@ -155426,7 +157123,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11297, + context: p14210, freeVariables: Identifiers{ "arr", }, @@ -155447,7 +157144,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11297, + context: p14210, freeVariables: Identifiers{ "i", }, @@ -155466,7 +157163,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11297, + context: p14210, freeVariables: Identifiers{ "i", }, @@ -155488,7 +157185,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11297, + context: p14210, freeVariables: nil, }, Value: float64(1), @@ -155512,7 +157209,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11297, + context: p14210, freeVariables: nil, }, Value: float64(15), @@ -155534,7 +157231,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11297, + context: p14210, freeVariables: nil, }, Value: float64(2), @@ -155559,7 +157256,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11297, + context: p14210, freeVariables: nil, }, Value: "=", @@ -155584,7 +157281,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11174, + context: p14087, freeVariables: Identifiers{ "arr", "aux", @@ -155607,7 +157304,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11174, + context: p14087, freeVariables: Identifiers{ "aux", }, @@ -155630,7 +157327,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11378, + context: p14291, freeVariables: Identifiers{ "arr", }, @@ -155651,7 +157348,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11378, + context: p14291, freeVariables: Identifiers{ "i", }, @@ -155670,7 +157367,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11378, + context: p14291, freeVariables: Identifiers{ "i", }, @@ -155692,7 +157389,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11378, + context: p14291, freeVariables: nil, }, Value: float64(3), @@ -155713,7 +157410,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11378, + context: p14291, freeVariables: Identifiers{ "r", "str", @@ -155733,7 +157430,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11378, + context: p14291, freeVariables: Identifiers{ "r", }, @@ -155755,7 +157452,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11378, + context: p14291, freeVariables: Identifiers{ "str", }, @@ -155784,7 +157481,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11174, + context: p14087, freeVariables: Identifiers{ "arr", "aux", @@ -155810,7 +157507,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11395, + context: p14308, freeVariables: Identifiers{ "arr", "base64_table", @@ -155831,7 +157528,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11395, + context: p14308, freeVariables: Identifiers{ "arr", "base64_table", @@ -155852,7 +157549,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11395, + context: p14308, freeVariables: Identifiers{ "arr", "base64_table", @@ -155873,7 +157570,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11395, + context: p14308, freeVariables: Identifiers{ "arr", "base64_table", @@ -155894,7 +157591,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11395, + context: p14308, freeVariables: Identifiers{ "base64_table", }, @@ -155915,7 +157612,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11395, + context: p14308, freeVariables: Identifiers{ "arr", "i", @@ -155935,7 +157632,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11395, + context: p14308, freeVariables: Identifiers{ "arr", "i", @@ -155955,7 +157652,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11395, + context: p14308, freeVariables: Identifiers{ "arr", "i", @@ -155975,7 +157672,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11395, + context: p14308, freeVariables: Identifiers{ "arr", }, @@ -155996,7 +157693,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11395, + context: p14308, freeVariables: Identifiers{ "i", }, @@ -156020,7 +157717,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11395, + context: p14308, freeVariables: nil, }, Value: float64(252), @@ -156042,7 +157739,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11395, + context: p14308, freeVariables: nil, }, Value: float64(2), @@ -156066,7 +157763,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11395, + context: p14308, freeVariables: Identifiers{ "arr", "base64_table", @@ -156087,7 +157784,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11395, + context: p14308, freeVariables: Identifiers{ "base64_table", }, @@ -156108,7 +157805,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11395, + context: p14308, freeVariables: Identifiers{ "arr", "i", @@ -156128,7 +157825,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11395, + context: p14308, freeVariables: Identifiers{ "arr", "i", @@ -156148,7 +157845,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11395, + context: p14308, freeVariables: Identifiers{ "arr", "i", @@ -156168,7 +157865,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11395, + context: p14308, freeVariables: Identifiers{ "arr", "i", @@ -156188,7 +157885,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11395, + context: p14308, freeVariables: Identifiers{ "arr", }, @@ -156209,7 +157906,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11395, + context: p14308, freeVariables: Identifiers{ "i", }, @@ -156233,7 +157930,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11395, + context: p14308, freeVariables: nil, }, Value: float64(3), @@ -156255,7 +157952,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11395, + context: p14308, freeVariables: nil, }, Value: float64(4), @@ -156277,7 +157974,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11395, + context: p14308, freeVariables: Identifiers{ "arr", "i", @@ -156297,7 +157994,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11395, + context: p14308, freeVariables: Identifiers{ "arr", "i", @@ -156317,7 +158014,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11395, + context: p14308, freeVariables: Identifiers{ "arr", "i", @@ -156337,7 +158034,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11395, + context: p14308, freeVariables: Identifiers{ "arr", }, @@ -156358,7 +158055,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11395, + context: p14308, freeVariables: Identifiers{ "i", }, @@ -156377,7 +158074,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11395, + context: p14308, freeVariables: Identifiers{ "i", }, @@ -156399,7 +158096,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11395, + context: p14308, freeVariables: nil, }, Value: float64(1), @@ -156423,7 +158120,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11395, + context: p14308, freeVariables: nil, }, Value: float64(240), @@ -156445,7 +158142,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11395, + context: p14308, freeVariables: nil, }, Value: float64(4), @@ -156471,7 +158168,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11395, + context: p14308, freeVariables: Identifiers{ "arr", "base64_table", @@ -156492,7 +158189,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11395, + context: p14308, freeVariables: Identifiers{ "base64_table", }, @@ -156513,7 +158210,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11395, + context: p14308, freeVariables: Identifiers{ "arr", "i", @@ -156533,7 +158230,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11395, + context: p14308, freeVariables: Identifiers{ "arr", "i", @@ -156553,7 +158250,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11395, + context: p14308, freeVariables: Identifiers{ "arr", "i", @@ -156573,7 +158270,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11395, + context: p14308, freeVariables: Identifiers{ "arr", "i", @@ -156593,7 +158290,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11395, + context: p14308, freeVariables: Identifiers{ "arr", }, @@ -156614,7 +158311,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11395, + context: p14308, freeVariables: Identifiers{ "i", }, @@ -156633,7 +158330,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11395, + context: p14308, freeVariables: Identifiers{ "i", }, @@ -156655,7 +158352,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11395, + context: p14308, freeVariables: nil, }, Value: float64(1), @@ -156679,7 +158376,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11395, + context: p14308, freeVariables: nil, }, Value: float64(15), @@ -156701,7 +158398,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11395, + context: p14308, freeVariables: nil, }, Value: float64(2), @@ -156723,7 +158420,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11395, + context: p14308, freeVariables: Identifiers{ "arr", "i", @@ -156743,7 +158440,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11395, + context: p14308, freeVariables: Identifiers{ "arr", "i", @@ -156763,7 +158460,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11395, + context: p14308, freeVariables: Identifiers{ "arr", "i", @@ -156783,7 +158480,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11395, + context: p14308, freeVariables: Identifiers{ "arr", }, @@ -156804,7 +158501,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11395, + context: p14308, freeVariables: Identifiers{ "i", }, @@ -156823,7 +158520,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11395, + context: p14308, freeVariables: Identifiers{ "i", }, @@ -156845,7 +158542,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11395, + context: p14308, freeVariables: nil, }, Value: float64(2), @@ -156869,7 +158566,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11395, + context: p14308, freeVariables: nil, }, Value: float64(192), @@ -156891,7 +158588,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11395, + context: p14308, freeVariables: nil, }, Value: float64(6), @@ -156917,7 +158614,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11395, + context: p14308, freeVariables: Identifiers{ "arr", "base64_table", @@ -156938,7 +158635,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11395, + context: p14308, freeVariables: Identifiers{ "base64_table", }, @@ -156959,7 +158656,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11395, + context: p14308, freeVariables: Identifiers{ "arr", "i", @@ -156979,7 +158676,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11395, + context: p14308, freeVariables: Identifiers{ "arr", "i", @@ -156999,7 +158696,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11395, + context: p14308, freeVariables: Identifiers{ "arr", }, @@ -157020,7 +158717,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11395, + context: p14308, freeVariables: Identifiers{ "i", }, @@ -157039,7 +158736,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11395, + context: p14308, freeVariables: Identifiers{ "i", }, @@ -157061,7 +158758,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11395, + context: p14308, freeVariables: nil, }, Value: float64(2), @@ -157085,7 +158782,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11395, + context: p14308, freeVariables: nil, }, Value: float64(63), @@ -157112,7 +158809,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11174, + context: p14087, freeVariables: Identifiers{ "arr", "aux", @@ -157135,7 +158832,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11174, + context: p14087, freeVariables: Identifiers{ "aux", }, @@ -157158,7 +158855,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11508, + context: p14421, freeVariables: Identifiers{ "arr", }, @@ -157179,7 +158876,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11508, + context: p14421, freeVariables: Identifiers{ "i", }, @@ -157198,7 +158895,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11508, + context: p14421, freeVariables: Identifiers{ "i", }, @@ -157220,7 +158917,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11508, + context: p14421, freeVariables: nil, }, Value: float64(3), @@ -157241,7 +158938,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11508, + context: p14421, freeVariables: Identifiers{ "r", "str", @@ -157261,7 +158958,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11508, + context: p14421, freeVariables: Identifiers{ "r", }, @@ -157283,7 +158980,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11508, + context: p14421, freeVariables: Identifiers{ "str", }, @@ -157319,7 +159016,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11112, + context: p14025, freeVariables: Identifiers{ "aux", "bytes", @@ -157343,7 +159040,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11525, + context: p14438, freeVariables: Identifiers{ "bytes", "std", @@ -157363,7 +159060,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11525, + context: p14438, freeVariables: Identifiers{ "std", }, @@ -157382,7 +159079,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11525, + context: p14438, freeVariables: Identifiers{ "std", }, @@ -157428,7 +159125,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11534, + context: p14447, freeVariables: nil, }, Parameters: Parameters{ @@ -157453,7 +159150,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11537, + context: p14450, freeVariables: Identifiers{ "a", "r", @@ -157473,7 +159170,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11537, + context: p14450, freeVariables: Identifiers{ "r", }, @@ -157495,7 +159192,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11537, + context: p14450, freeVariables: Identifiers{ "a", }, @@ -157514,7 +159211,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11537, + context: p14450, freeVariables: Identifiers{ "a", }, @@ -157536,7 +159233,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11537, + context: p14450, freeVariables: nil, }, Value: float64(256), @@ -157559,7 +159256,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11534, + context: p14447, freeVariables: Identifiers{ "bytes", }, @@ -157580,7 +159277,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11534, + context: p14447, freeVariables: nil, }, Value: true, @@ -157608,7 +159305,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11112, + context: p14025, freeVariables: Identifiers{ "aux", "bytes", @@ -157629,7 +159326,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11112, + context: p14025, freeVariables: Identifiers{ "sanity", }, @@ -157649,7 +159346,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11112, + context: p14025, freeVariables: Identifiers{ "sanity", }, @@ -157671,7 +159368,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11112, + context: p14025, freeVariables: nil, }, Expr: &LiteralString{ @@ -157688,7 +159385,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11112, + context: p14025, freeVariables: nil, }, Value: "Can only base64 encode strings / arrays of single bytes.", @@ -157710,7 +159407,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11112, + context: p14025, freeVariables: Identifiers{ "aux", "bytes", @@ -157730,7 +159427,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11112, + context: p14025, freeVariables: Identifiers{ "aux", }, @@ -157753,7 +159450,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11563, + context: p14476, freeVariables: Identifiers{ "bytes", }, @@ -157774,7 +159471,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11563, + context: p14476, freeVariables: nil, }, Value: float64(0), @@ -157794,7 +159491,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11563, + context: p14476, freeVariables: nil, }, Value: "", @@ -157872,7 +159569,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p14484, freeVariables: nil, }, }, @@ -157894,7 +159591,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p14486, freeVariables: nil, }, Value: "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/", @@ -158136,7 +159833,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p14510, freeVariables: Identifiers{ "base64_table", "i", @@ -158160,7 +159857,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p14514, freeVariables: Identifiers{ "base64_table", "i", @@ -158180,7 +159877,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p14517, freeVariables: Identifiers{ "base64_table", }, @@ -158201,7 +159898,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p14520, freeVariables: Identifiers{ "i", }, @@ -158224,7 +159921,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p43, + context: p14523, freeVariables: Identifiers{ "i", }, @@ -158253,7 +159950,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p14526, freeVariables: Identifiers{ "std", }, @@ -158272,7 +159969,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p14529, freeVariables: Identifiers{ "std", }, @@ -158291,7 +159988,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p14532, freeVariables: Identifiers{ "std", }, @@ -158337,7 +160034,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p54, + context: p14537, freeVariables: nil, }, Value: float64(0), @@ -158357,14 +160054,14 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p54, + context: p14539, freeVariables: nil, }, Value: float64(63), OriginalString: "63", }, }, - Named: nil, + Named: []NamedArgument{}, }, TrailingComma: false, TailStrict: false, @@ -158420,7 +160117,7 @@ var StdAst = &DesugaredObject{ }, file: nil, }, - context: p9, + context: p68, freeVariables: Identifiers{ "base64_inv", "std", @@ -158447,7 +160144,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11573, + context: p14545, freeVariables: Identifiers{ "base64_inv", "std", @@ -158659,7 +160356,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11573, + context: p14545, freeVariables: Identifiers{ "std", "str", @@ -158679,7 +160376,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11573, + context: p14545, freeVariables: Identifiers{ "std", }, @@ -158698,7 +160395,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11573, + context: p14545, freeVariables: Identifiers{ "std", }, @@ -158744,7 +160441,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11602, + context: p14574, freeVariables: Identifiers{ "str", }, @@ -158771,7 +160468,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11573, + context: p14545, freeVariables: nil, }, Value: float64(4), @@ -158797,7 +160494,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11573, + context: p14545, freeVariables: nil, }, Value: float64(0), @@ -158824,7 +160521,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11573, + context: p14545, freeVariables: Identifiers{ "std", "str", @@ -158929,7 +160626,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11573, + context: p14545, freeVariables: nil, }, Value: "Not a base64 encoded string \"%s\"", @@ -158950,7 +160647,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11573, + context: p14545, freeVariables: Identifiers{ "str", }, @@ -158978,7 +160675,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11573, + context: p14545, freeVariables: Identifiers{ "base64_inv", "std", @@ -159002,7 +160699,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11623, + context: p14595, freeVariables: Identifiers{ "aux", "base64_inv", @@ -159032,7 +160729,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11627, + context: p14599, freeVariables: Identifiers{ "aux", "base64_inv", @@ -159056,7 +160753,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11627, + context: p14599, freeVariables: Identifiers{ "i", "std", @@ -159077,7 +160774,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11627, + context: p14599, freeVariables: Identifiers{ "i", }, @@ -159099,7 +160796,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11627, + context: p14599, freeVariables: Identifiers{ "std", "str", @@ -159119,7 +160816,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11627, + context: p14599, freeVariables: Identifiers{ "std", }, @@ -159138,7 +160835,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11627, + context: p14599, freeVariables: Identifiers{ "std", }, @@ -159184,7 +160881,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11642, + context: p14614, freeVariables: Identifiers{ "str", }, @@ -159212,7 +160909,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11627, + context: p14599, freeVariables: Identifiers{ "r", }, @@ -159233,7 +160930,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11627, + context: p14599, freeVariables: Identifiers{ "aux", "base64_inv", @@ -159260,7 +160957,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11650, + context: p14622, freeVariables: Identifiers{ "base64_inv", "i", @@ -159282,7 +160979,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11654, + context: p14626, freeVariables: Identifiers{ "base64_inv", "i", @@ -159303,7 +161000,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11654, + context: p14626, freeVariables: Identifiers{ "base64_inv", "i", @@ -159324,7 +161021,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11654, + context: p14626, freeVariables: Identifiers{ "base64_inv", "i", @@ -159345,7 +161042,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11654, + context: p14626, freeVariables: Identifiers{ "base64_inv", }, @@ -159366,7 +161063,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11654, + context: p14626, freeVariables: Identifiers{ "i", "str", @@ -159386,7 +161083,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11654, + context: p14626, freeVariables: Identifiers{ "str", }, @@ -159407,7 +161104,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11654, + context: p14626, freeVariables: Identifiers{ "i", }, @@ -159433,7 +161130,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11654, + context: p14626, freeVariables: nil, }, Value: float64(2), @@ -159455,7 +161152,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11654, + context: p14626, freeVariables: Identifiers{ "base64_inv", "i", @@ -159476,7 +161173,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11654, + context: p14626, freeVariables: Identifiers{ "base64_inv", "i", @@ -159497,7 +161194,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11654, + context: p14626, freeVariables: Identifiers{ "base64_inv", }, @@ -159518,7 +161215,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11654, + context: p14626, freeVariables: Identifiers{ "i", "str", @@ -159538,7 +161235,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11654, + context: p14626, freeVariables: Identifiers{ "str", }, @@ -159559,7 +161256,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11654, + context: p14626, freeVariables: Identifiers{ "i", }, @@ -159578,7 +161275,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11654, + context: p14626, freeVariables: Identifiers{ "i", }, @@ -159600,7 +161297,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11654, + context: p14626, freeVariables: nil, }, Value: float64(1), @@ -159626,7 +161323,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11654, + context: p14626, freeVariables: nil, }, Value: float64(4), @@ -159654,7 +161351,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11627, + context: p14599, freeVariables: Identifiers{ "aux", "base64_inv", @@ -159682,7 +161379,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11689, + context: p14661, freeVariables: Identifiers{ "base64_inv", "i", @@ -159790,7 +161487,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11689, + context: p14661, freeVariables: Identifiers{ "i", "str", @@ -159810,7 +161507,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11689, + context: p14661, freeVariables: Identifiers{ "str", }, @@ -159831,7 +161528,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11689, + context: p14661, freeVariables: Identifiers{ "i", }, @@ -159850,7 +161547,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11689, + context: p14661, freeVariables: Identifiers{ "i", }, @@ -159872,7 +161569,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11689, + context: p14661, freeVariables: nil, }, Value: float64(2), @@ -159895,7 +161592,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11689, + context: p14661, freeVariables: nil, }, Value: "=", @@ -159922,7 +161619,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11689, + context: p14661, freeVariables: nil, }, Elements: nil, @@ -159942,7 +161639,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11689, + context: p14661, freeVariables: Identifiers{ "base64_inv", "i", @@ -159964,7 +161661,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11714, + context: p14686, freeVariables: Identifiers{ "base64_inv", "i", @@ -159985,7 +161682,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11714, + context: p14686, freeVariables: Identifiers{ "base64_inv", "i", @@ -160006,7 +161703,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11714, + context: p14686, freeVariables: Identifiers{ "base64_inv", "i", @@ -160027,7 +161724,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11714, + context: p14686, freeVariables: Identifiers{ "base64_inv", "i", @@ -160048,7 +161745,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11714, + context: p14686, freeVariables: Identifiers{ "base64_inv", }, @@ -160069,7 +161766,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11714, + context: p14686, freeVariables: Identifiers{ "i", "str", @@ -160089,7 +161786,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11714, + context: p14686, freeVariables: Identifiers{ "str", }, @@ -160110,7 +161807,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11714, + context: p14686, freeVariables: Identifiers{ "i", }, @@ -160129,7 +161826,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11714, + context: p14686, freeVariables: Identifiers{ "i", }, @@ -160151,7 +161848,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11714, + context: p14686, freeVariables: nil, }, Value: float64(1), @@ -160177,7 +161874,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11714, + context: p14686, freeVariables: nil, }, Value: float64(15), @@ -160199,7 +161896,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11714, + context: p14686, freeVariables: nil, }, Value: float64(4), @@ -160221,7 +161918,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11714, + context: p14686, freeVariables: Identifiers{ "base64_inv", "i", @@ -160242,7 +161939,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11714, + context: p14686, freeVariables: Identifiers{ "base64_inv", "i", @@ -160263,7 +161960,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11714, + context: p14686, freeVariables: Identifiers{ "base64_inv", }, @@ -160284,7 +161981,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11714, + context: p14686, freeVariables: Identifiers{ "i", "str", @@ -160304,7 +162001,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11714, + context: p14686, freeVariables: Identifiers{ "str", }, @@ -160325,7 +162022,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11714, + context: p14686, freeVariables: Identifiers{ "i", }, @@ -160344,7 +162041,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11714, + context: p14686, freeVariables: Identifiers{ "i", }, @@ -160366,7 +162063,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11714, + context: p14686, freeVariables: nil, }, Value: float64(2), @@ -160392,7 +162089,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11714, + context: p14686, freeVariables: nil, }, Value: float64(2), @@ -160421,7 +162118,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11627, + context: p14599, freeVariables: Identifiers{ "aux", "base64_inv", @@ -160450,7 +162147,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11755, + context: p14727, freeVariables: Identifiers{ "base64_inv", "i", @@ -160558,7 +162255,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11755, + context: p14727, freeVariables: Identifiers{ "i", "str", @@ -160578,7 +162275,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11755, + context: p14727, freeVariables: Identifiers{ "str", }, @@ -160599,7 +162296,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11755, + context: p14727, freeVariables: Identifiers{ "i", }, @@ -160618,7 +162315,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11755, + context: p14727, freeVariables: Identifiers{ "i", }, @@ -160640,7 +162337,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11755, + context: p14727, freeVariables: nil, }, Value: float64(3), @@ -160663,7 +162360,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11755, + context: p14727, freeVariables: nil, }, Value: "=", @@ -160690,7 +162387,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11755, + context: p14727, freeVariables: nil, }, Elements: nil, @@ -160710,7 +162407,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11755, + context: p14727, freeVariables: Identifiers{ "base64_inv", "i", @@ -160732,7 +162429,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11780, + context: p14752, freeVariables: Identifiers{ "base64_inv", "i", @@ -160753,7 +162450,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11780, + context: p14752, freeVariables: Identifiers{ "base64_inv", "i", @@ -160774,7 +162471,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11780, + context: p14752, freeVariables: Identifiers{ "base64_inv", "i", @@ -160795,7 +162492,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11780, + context: p14752, freeVariables: Identifiers{ "base64_inv", "i", @@ -160816,7 +162513,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11780, + context: p14752, freeVariables: Identifiers{ "base64_inv", }, @@ -160837,7 +162534,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11780, + context: p14752, freeVariables: Identifiers{ "i", "str", @@ -160857,7 +162554,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11780, + context: p14752, freeVariables: Identifiers{ "str", }, @@ -160878,7 +162575,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11780, + context: p14752, freeVariables: Identifiers{ "i", }, @@ -160897,7 +162594,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11780, + context: p14752, freeVariables: Identifiers{ "i", }, @@ -160919,7 +162616,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11780, + context: p14752, freeVariables: nil, }, Value: float64(2), @@ -160945,7 +162642,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11780, + context: p14752, freeVariables: nil, }, Value: float64(3), @@ -160967,7 +162664,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11780, + context: p14752, freeVariables: nil, }, Value: float64(6), @@ -160989,7 +162686,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11780, + context: p14752, freeVariables: Identifiers{ "base64_inv", "i", @@ -161010,7 +162707,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11780, + context: p14752, freeVariables: Identifiers{ "base64_inv", }, @@ -161031,7 +162728,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11780, + context: p14752, freeVariables: Identifiers{ "i", "str", @@ -161051,7 +162748,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11780, + context: p14752, freeVariables: Identifiers{ "str", }, @@ -161072,7 +162769,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11780, + context: p14752, freeVariables: Identifiers{ "i", }, @@ -161091,7 +162788,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11780, + context: p14752, freeVariables: Identifiers{ "i", }, @@ -161113,7 +162810,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11780, + context: p14752, freeVariables: nil, }, Value: float64(3), @@ -161146,7 +162843,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11627, + context: p14599, freeVariables: Identifiers{ "aux", "i", @@ -161171,7 +162868,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11627, + context: p14599, freeVariables: Identifiers{ "aux", }, @@ -161194,7 +162891,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11820, + context: p14792, freeVariables: Identifiers{ "str", }, @@ -161215,7 +162912,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11820, + context: p14792, freeVariables: Identifiers{ "i", }, @@ -161234,7 +162931,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11820, + context: p14792, freeVariables: Identifiers{ "i", }, @@ -161256,7 +162953,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11820, + context: p14792, freeVariables: nil, }, Value: float64(4), @@ -161277,7 +162974,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11820, + context: p14792, freeVariables: Identifiers{ "n1", "n2", @@ -161299,7 +162996,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11820, + context: p14792, freeVariables: Identifiers{ "n1", "n2", @@ -161320,7 +163017,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11820, + context: p14792, freeVariables: Identifiers{ "n1", "r", @@ -161340,7 +163037,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11820, + context: p14792, freeVariables: Identifiers{ "r", }, @@ -161362,7 +163059,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11820, + context: p14792, freeVariables: Identifiers{ "n1", }, @@ -161385,7 +163082,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11820, + context: p14792, freeVariables: Identifiers{ "n2", }, @@ -161408,7 +163105,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11820, + context: p14792, freeVariables: Identifiers{ "n3", }, @@ -161444,7 +163141,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11573, + context: p14545, freeVariables: Identifiers{ "aux", "str", @@ -161464,7 +163161,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11573, + context: p14545, freeVariables: Identifiers{ "aux", }, @@ -161487,7 +163184,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11847, + context: p14819, freeVariables: Identifiers{ "str", }, @@ -161508,7 +163205,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11847, + context: p14819, freeVariables: nil, }, Value: float64(0), @@ -161528,7 +163225,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11847, + context: p14819, freeVariables: nil, }, Elements: nil, @@ -161603,7 +163300,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p14827, freeVariables: nil, }, }, @@ -161625,7 +163322,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p14829, freeVariables: nil, }, Value: "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/", @@ -161867,7 +163564,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p14853, freeVariables: Identifiers{ "base64_table", "i", @@ -161891,7 +163588,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p14857, freeVariables: Identifiers{ "base64_table", "i", @@ -161911,7 +163608,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p14860, freeVariables: Identifiers{ "base64_table", }, @@ -161932,7 +163629,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p14863, freeVariables: Identifiers{ "i", }, @@ -161955,7 +163652,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p43, + context: p14866, freeVariables: Identifiers{ "i", }, @@ -161984,7 +163681,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p14869, freeVariables: Identifiers{ "std", }, @@ -162003,7 +163700,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p14872, freeVariables: Identifiers{ "std", }, @@ -162022,7 +163719,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p14875, freeVariables: Identifiers{ "std", }, @@ -162068,7 +163765,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p54, + context: p14880, freeVariables: nil, }, Value: float64(0), @@ -162088,14 +163785,14 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p54, + context: p14882, freeVariables: nil, }, Value: float64(63), OriginalString: "63", }, }, - Named: nil, + Named: []NamedArgument{}, }, TrailingComma: false, TailStrict: false, @@ -162151,7 +163848,7 @@ var StdAst = &DesugaredObject{ }, file: nil, }, - context: p9, + context: p68, freeVariables: Identifiers{ "std", }, @@ -162177,7 +163874,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11857, + context: p14888, freeVariables: Identifiers{ "std", "str", @@ -162200,7 +163897,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11861, + context: p14892, freeVariables: Identifiers{ "std", "str", @@ -162220,7 +163917,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11861, + context: p14892, freeVariables: Identifiers{ "std", }, @@ -162239,7 +163936,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11861, + context: p14892, freeVariables: Identifiers{ "std", }, @@ -162285,7 +163982,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11870, + context: p14901, freeVariables: Identifiers{ "str", }, @@ -162315,7 +164012,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11857, + context: p14888, freeVariables: Identifiers{ "bytes", "std", @@ -162335,7 +164032,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11857, + context: p14888, freeVariables: Identifiers{ "std", }, @@ -162354,7 +164051,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11857, + context: p14888, freeVariables: Identifiers{ "std", }, @@ -162400,7 +164097,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11881, + context: p14912, freeVariables: nil, }, Value: "", @@ -162421,7 +164118,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11881, + context: p14912, freeVariables: Identifiers{ "bytes", "std", @@ -162441,7 +164138,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11881, + context: p14912, freeVariables: Identifiers{ "std", }, @@ -162460,7 +164157,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11881, + context: p14912, freeVariables: Identifiers{ "std", }, @@ -162506,7 +164203,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11891, + context: p14922, freeVariables: Identifiers{ "std", }, @@ -162532,7 +164229,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11895, + context: p14926, freeVariables: Identifiers{ "b", "std", @@ -162552,7 +164249,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11895, + context: p14926, freeVariables: Identifiers{ "std", }, @@ -162571,7 +164268,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11895, + context: p14926, freeVariables: Identifiers{ "std", }, @@ -162617,7 +164314,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11904, + context: p14935, freeVariables: Identifiers{ "b", }, @@ -162645,7 +164342,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11891, + context: p14922, freeVariables: Identifiers{ "bytes", }, @@ -162726,7 +164423,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p14943, freeVariables: nil, }, }, @@ -162748,7 +164445,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p14945, freeVariables: nil, }, Value: "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/", @@ -162990,7 +164687,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p14969, freeVariables: Identifiers{ "base64_table", "i", @@ -163014,7 +164711,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p14973, freeVariables: Identifiers{ "base64_table", "i", @@ -163034,7 +164731,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p14976, freeVariables: Identifiers{ "base64_table", }, @@ -163055,7 +164752,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p14979, freeVariables: Identifiers{ "i", }, @@ -163078,7 +164775,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p43, + context: p14982, freeVariables: Identifiers{ "i", }, @@ -163107,7 +164804,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p14985, freeVariables: Identifiers{ "std", }, @@ -163126,7 +164823,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p14988, freeVariables: Identifiers{ "std", }, @@ -163145,7 +164842,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p14991, freeVariables: Identifiers{ "std", }, @@ -163191,7 +164888,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p54, + context: p14996, freeVariables: nil, }, Value: float64(0), @@ -163211,14 +164908,14 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p54, + context: p14998, freeVariables: nil, }, Value: float64(63), OriginalString: "63", }, }, - Named: nil, + Named: []NamedArgument{}, }, TrailingComma: false, TailStrict: false, @@ -163274,7 +164971,7 @@ var StdAst = &DesugaredObject{ }, file: nil, }, - context: p9, + context: p68, freeVariables: Identifiers{ "std", }, @@ -163300,7 +164997,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11914, + context: p15004, freeVariables: Identifiers{ "arr", "std", @@ -163323,7 +165020,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11918, + context: p15008, freeVariables: Identifiers{ "arr", "std", @@ -163343,7 +165040,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11918, + context: p15008, freeVariables: Identifiers{ "std", }, @@ -163362,7 +165059,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11918, + context: p15008, freeVariables: Identifiers{ "std", }, @@ -163408,7 +165105,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11927, + context: p15017, freeVariables: Identifiers{ "arr", }, @@ -163438,7 +165135,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11914, + context: p15004, freeVariables: Identifiers{ "arr", "l", @@ -163544,7 +165241,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11914, + context: p15004, freeVariables: Identifiers{ "arr", "std", @@ -163564,7 +165261,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11914, + context: p15004, freeVariables: Identifiers{ "std", }, @@ -163583,7 +165280,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11914, + context: p15004, freeVariables: Identifiers{ "std", }, @@ -163629,7 +165326,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11948, + context: p15038, freeVariables: Identifiers{ "arr", }, @@ -163656,7 +165353,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11914, + context: p15004, freeVariables: nil, }, Value: float64(0), @@ -163682,7 +165379,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11914, + context: p15004, freeVariables: nil, }, Elements: nil, @@ -163702,7 +165399,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11914, + context: p15004, freeVariables: Identifiers{ "arr", "l", @@ -163726,7 +165423,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11956, + context: p15046, freeVariables: Identifiers{ "arr", }, @@ -163745,7 +165442,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11956, + context: p15046, freeVariables: Identifiers{ "arr", }, @@ -163766,7 +165463,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11956, + context: p15046, freeVariables: nil, }, Value: float64(0), @@ -163791,7 +165488,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11914, + context: p15004, freeVariables: Identifiers{ "arr", "l", @@ -163816,7 +165513,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11965, + context: p15055, freeVariables: Identifiers{ "arr", "l", @@ -163837,7 +165534,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11965, + context: p15055, freeVariables: Identifiers{ "std", }, @@ -163856,7 +165553,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11965, + context: p15055, freeVariables: Identifiers{ "std", }, @@ -163902,7 +165599,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11974, + context: p15064, freeVariables: Identifiers{ "l", }, @@ -163921,7 +165618,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11974, + context: p15064, freeVariables: Identifiers{ "l", }, @@ -163943,7 +165640,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11974, + context: p15064, freeVariables: nil, }, Value: float64(1), @@ -163964,7 +165661,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11974, + context: p15064, freeVariables: Identifiers{ "arr", }, @@ -163990,7 +165687,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11983, + context: p15073, freeVariables: Identifiers{ "arr", "i", @@ -164010,7 +165707,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11983, + context: p15073, freeVariables: Identifiers{ "arr", }, @@ -164031,7 +165728,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11983, + context: p15073, freeVariables: Identifiers{ "i", }, @@ -164050,7 +165747,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11983, + context: p15073, freeVariables: Identifiers{ "i", }, @@ -164072,7 +165769,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11983, + context: p15073, freeVariables: nil, }, Value: float64(1), @@ -164105,7 +165802,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11914, + context: p15004, freeVariables: Identifiers{ "pivot", "rest", @@ -164129,7 +165826,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11996, + context: p15086, freeVariables: Identifiers{ "pivot", "rest", @@ -164150,7 +165847,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11996, + context: p15086, freeVariables: Identifiers{ "std", }, @@ -164169,7 +165866,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11996, + context: p15086, freeVariables: Identifiers{ "std", }, @@ -164215,7 +165912,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12005, + context: p15095, freeVariables: Identifiers{ "pivot", }, @@ -164241,7 +165938,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12009, + context: p15099, freeVariables: Identifiers{ "pivot", "x", @@ -164261,7 +165958,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12009, + context: p15099, freeVariables: Identifiers{ "x", }, @@ -164283,7 +165980,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12009, + context: p15099, freeVariables: Identifiers{ "pivot", }, @@ -164306,7 +166003,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12005, + context: p15095, freeVariables: Identifiers{ "rest", }, @@ -164336,7 +166033,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11914, + context: p15004, freeVariables: Identifiers{ "left", "pivot", @@ -164361,7 +166058,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12021, + context: p15111, freeVariables: Identifiers{ "pivot", "rest", @@ -164382,7 +166079,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12021, + context: p15111, freeVariables: Identifiers{ "std", }, @@ -164401,7 +166098,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12021, + context: p15111, freeVariables: Identifiers{ "std", }, @@ -164447,7 +166144,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12030, + context: p15120, freeVariables: Identifiers{ "pivot", }, @@ -164473,7 +166170,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12034, + context: p15124, freeVariables: Identifiers{ "pivot", "x", @@ -164493,7 +166190,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12034, + context: p15124, freeVariables: Identifiers{ "x", }, @@ -164515,7 +166212,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12034, + context: p15124, freeVariables: Identifiers{ "pivot", }, @@ -164538,7 +166235,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12030, + context: p15120, freeVariables: Identifiers{ "rest", }, @@ -164568,7 +166265,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11914, + context: p15004, freeVariables: Identifiers{ "left", "pivot", @@ -164590,7 +166287,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11914, + context: p15004, freeVariables: Identifiers{ "left", "pivot", @@ -164611,7 +166308,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11914, + context: p15004, freeVariables: Identifiers{ "left", "std", @@ -164631,7 +166328,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11914, + context: p15004, freeVariables: Identifiers{ "std", }, @@ -164650,7 +166347,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11914, + context: p15004, freeVariables: Identifiers{ "std", }, @@ -164696,7 +166393,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12055, + context: p15145, freeVariables: Identifiers{ "left", }, @@ -164724,7 +166421,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11914, + context: p15004, freeVariables: Identifiers{ "pivot", }, @@ -164744,7 +166441,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12061, + context: p15151, freeVariables: Identifiers{ "pivot", }, @@ -164770,7 +166467,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11914, + context: p15004, freeVariables: Identifiers{ "right", "std", @@ -164790,7 +166487,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11914, + context: p15004, freeVariables: Identifiers{ "std", }, @@ -164809,7 +166506,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p11914, + context: p15004, freeVariables: Identifiers{ "std", }, @@ -164855,7 +166552,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12072, + context: p15162, freeVariables: Identifiers{ "right", }, @@ -164936,7 +166633,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p15168, freeVariables: nil, }, }, @@ -164958,7 +166655,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p15170, freeVariables: nil, }, Value: "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/", @@ -165200,7 +166897,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p15194, freeVariables: Identifiers{ "base64_table", "i", @@ -165224,7 +166921,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p15198, freeVariables: Identifiers{ "base64_table", "i", @@ -165244,7 +166941,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p15201, freeVariables: Identifiers{ "base64_table", }, @@ -165265,7 +166962,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p15204, freeVariables: Identifiers{ "i", }, @@ -165288,7 +166985,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p43, + context: p15207, freeVariables: Identifiers{ "i", }, @@ -165317,7 +167014,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p15210, freeVariables: Identifiers{ "std", }, @@ -165336,7 +167033,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p15213, freeVariables: Identifiers{ "std", }, @@ -165355,7 +167052,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p15216, freeVariables: Identifiers{ "std", }, @@ -165401,7 +167098,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p54, + context: p15221, freeVariables: nil, }, Value: float64(0), @@ -165421,14 +167118,14 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p54, + context: p15223, freeVariables: nil, }, Value: float64(63), OriginalString: "63", }, }, - Named: nil, + Named: []NamedArgument{}, }, TrailingComma: false, TailStrict: false, @@ -165484,7 +167181,7 @@ var StdAst = &DesugaredObject{ }, file: nil, }, - context: p9, + context: p68, freeVariables: Identifiers{ "std", }, @@ -165510,7 +167207,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12080, + context: p15229, freeVariables: Identifiers{ "arr", "std", @@ -165533,7 +167230,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12084, + context: p15233, freeVariables: Identifiers{ "std", }, @@ -165560,7 +167257,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12088, + context: p15237, freeVariables: Identifiers{ "a", "b", @@ -165666,7 +167363,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12088, + context: p15237, freeVariables: Identifiers{ "a", "std", @@ -165686,7 +167383,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12088, + context: p15237, freeVariables: Identifiers{ "std", }, @@ -165705,7 +167402,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12088, + context: p15237, freeVariables: Identifiers{ "std", }, @@ -165751,7 +167448,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12107, + context: p15256, freeVariables: Identifiers{ "a", }, @@ -165778,7 +167475,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12088, + context: p15237, freeVariables: nil, }, Value: float64(0), @@ -165804,7 +167501,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12088, + context: p15237, freeVariables: Identifiers{ "b", }, @@ -165824,7 +167521,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12114, + context: p15263, freeVariables: Identifiers{ "b", }, @@ -165848,7 +167545,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12088, + context: p15237, freeVariables: Identifiers{ "a", "b", @@ -165955,7 +167652,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12088, + context: p15237, freeVariables: Identifiers{ "a", "std", @@ -165975,7 +167672,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12088, + context: p15237, freeVariables: Identifiers{ "a", }, @@ -165996,7 +167693,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12088, + context: p15237, freeVariables: Identifiers{ "a", "std", @@ -166016,7 +167713,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12088, + context: p15237, freeVariables: Identifiers{ "a", "std", @@ -166036,7 +167733,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12088, + context: p15237, freeVariables: Identifiers{ "std", }, @@ -166055,7 +167752,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12088, + context: p15237, freeVariables: Identifiers{ "std", }, @@ -166101,7 +167798,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12141, + context: p15290, freeVariables: Identifiers{ "a", }, @@ -166129,7 +167826,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12088, + context: p15237, freeVariables: nil, }, Value: float64(1), @@ -166152,7 +167849,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12088, + context: p15237, freeVariables: Identifiers{ "b", }, @@ -166179,7 +167876,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12088, + context: p15237, freeVariables: Identifiers{ "a", }, @@ -166200,7 +167897,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12088, + context: p15237, freeVariables: Identifiers{ "a", "b", @@ -166220,7 +167917,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12088, + context: p15237, freeVariables: Identifiers{ "a", }, @@ -166242,7 +167939,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12088, + context: p15237, freeVariables: Identifiers{ "b", }, @@ -166262,7 +167959,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12156, + context: p15305, freeVariables: Identifiers{ "b", }, @@ -166293,7 +167990,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12080, + context: p15229, freeVariables: Identifiers{ "arr", "f", @@ -166314,7 +168011,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12080, + context: p15229, freeVariables: Identifiers{ "std", }, @@ -166333,7 +168030,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12080, + context: p15229, freeVariables: Identifiers{ "std", }, @@ -166379,7 +168076,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12167, + context: p15316, freeVariables: Identifiers{ "f", }, @@ -166400,7 +168097,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12167, + context: p15316, freeVariables: Identifiers{ "arr", }, @@ -166421,7 +168118,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12167, + context: p15316, freeVariables: nil, }, Elements: nil, @@ -166495,7 +168192,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p15325, freeVariables: nil, }, }, @@ -166517,7 +168214,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p15327, freeVariables: nil, }, Value: "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/", @@ -166759,7 +168456,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p15351, freeVariables: Identifiers{ "base64_table", "i", @@ -166783,7 +168480,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p15355, freeVariables: Identifiers{ "base64_table", "i", @@ -166803,7 +168500,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p15358, freeVariables: Identifiers{ "base64_table", }, @@ -166824,7 +168521,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p15361, freeVariables: Identifiers{ "i", }, @@ -166847,7 +168544,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p43, + context: p15364, freeVariables: Identifiers{ "i", }, @@ -166876,7 +168573,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p15367, freeVariables: Identifiers{ "std", }, @@ -166895,7 +168592,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p15370, freeVariables: Identifiers{ "std", }, @@ -166914,7 +168611,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p15373, freeVariables: Identifiers{ "std", }, @@ -166960,7 +168657,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p54, + context: p15378, freeVariables: nil, }, Value: float64(0), @@ -166980,14 +168677,14 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p54, + context: p15380, freeVariables: nil, }, Value: float64(63), OriginalString: "63", }, }, - Named: nil, + Named: []NamedArgument{}, }, TrailingComma: false, TailStrict: false, @@ -167043,7 +168740,7 @@ var StdAst = &DesugaredObject{ }, file: nil, }, - context: p9, + context: p68, freeVariables: Identifiers{ "std", }, @@ -167069,7 +168766,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12178, + context: p15386, freeVariables: Identifiers{ "arr", "std", @@ -167089,7 +168786,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12178, + context: p15386, freeVariables: Identifiers{ "std", }, @@ -167108,7 +168805,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12178, + context: p15386, freeVariables: Identifiers{ "std", }, @@ -167154,7 +168851,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12187, + context: p15395, freeVariables: Identifiers{ "arr", "std", @@ -167174,7 +168871,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12187, + context: p15395, freeVariables: Identifiers{ "std", }, @@ -167193,7 +168890,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12187, + context: p15395, freeVariables: Identifiers{ "std", }, @@ -167239,7 +168936,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12196, + context: p15404, freeVariables: Identifiers{ "arr", }, @@ -167319,7 +169016,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p15410, freeVariables: nil, }, }, @@ -167341,7 +169038,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p15412, freeVariables: nil, }, Value: "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/", @@ -167583,7 +169280,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p15436, freeVariables: Identifiers{ "base64_table", "i", @@ -167607,7 +169304,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p15440, freeVariables: Identifiers{ "base64_table", "i", @@ -167627,7 +169324,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p15443, freeVariables: Identifiers{ "base64_table", }, @@ -167648,7 +169345,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p15446, freeVariables: Identifiers{ "i", }, @@ -167671,7 +169368,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p43, + context: p15449, freeVariables: Identifiers{ "i", }, @@ -167700,7 +169397,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p15452, freeVariables: Identifiers{ "std", }, @@ -167719,7 +169416,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p15455, freeVariables: Identifiers{ "std", }, @@ -167738,7 +169435,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p15458, freeVariables: Identifiers{ "std", }, @@ -167784,7 +169481,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p54, + context: p15463, freeVariables: nil, }, Value: float64(0), @@ -167804,14 +169501,14 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p54, + context: p15465, freeVariables: nil, }, Value: float64(63), OriginalString: "63", }, }, - Named: nil, + Named: []NamedArgument{}, }, TrailingComma: false, TailStrict: false, @@ -167867,7 +169564,7 @@ var StdAst = &DesugaredObject{ }, file: nil, }, - context: p9, + context: p68, freeVariables: Identifiers{ "std", }, @@ -167894,7 +169591,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12204, + context: p15471, freeVariables: Identifiers{ "arr", "std", @@ -167915,7 +169612,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12204, + context: p15471, freeVariables: Identifiers{ "arr", "std", @@ -167936,7 +169633,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12204, + context: p15471, freeVariables: Identifiers{ "std", }, @@ -167955,7 +169652,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12204, + context: p15471, freeVariables: Identifiers{ "std", }, @@ -168001,7 +169698,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12215, + context: p15482, freeVariables: Identifiers{ "arr", "std", @@ -168022,7 +169719,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12215, + context: p15482, freeVariables: Identifiers{ "std", }, @@ -168041,7 +169738,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12215, + context: p15482, freeVariables: Identifiers{ "std", }, @@ -168087,7 +169784,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12224, + context: p15491, freeVariables: Identifiers{ "x", }, @@ -168107,7 +169804,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12228, + context: p15495, freeVariables: Identifiers{ "x", }, @@ -168131,7 +169828,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12224, + context: p15491, freeVariables: Identifiers{ "arr", }, @@ -168165,7 +169862,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12204, + context: p15471, freeVariables: nil, }, Value: float64(0), @@ -168233,7 +169930,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p15504, freeVariables: nil, }, }, @@ -168255,7 +169952,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p15506, freeVariables: nil, }, Value: "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/", @@ -168497,7 +170194,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p15530, freeVariables: Identifiers{ "base64_table", "i", @@ -168521,7 +170218,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p15534, freeVariables: Identifiers{ "base64_table", "i", @@ -168541,7 +170238,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p15537, freeVariables: Identifiers{ "base64_table", }, @@ -168562,7 +170259,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p15540, freeVariables: Identifiers{ "i", }, @@ -168585,7 +170282,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p43, + context: p15543, freeVariables: Identifiers{ "i", }, @@ -168614,7 +170311,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p15546, freeVariables: Identifiers{ "std", }, @@ -168633,7 +170330,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p15549, freeVariables: Identifiers{ "std", }, @@ -168652,7 +170349,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p15552, freeVariables: Identifiers{ "std", }, @@ -168698,7 +170395,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p54, + context: p15557, freeVariables: nil, }, Value: float64(0), @@ -168718,14 +170415,14 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p54, + context: p15559, freeVariables: nil, }, Value: float64(63), OriginalString: "63", }, }, - Named: nil, + Named: []NamedArgument{}, }, TrailingComma: false, TailStrict: false, @@ -168781,7 +170478,7 @@ var StdAst = &DesugaredObject{ }, file: nil, }, - context: p9, + context: p68, freeVariables: Identifiers{ "std", }, @@ -168808,7 +170505,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12239, + context: p15565, freeVariables: Identifiers{ "a", "b", @@ -168829,7 +170526,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12239, + context: p15565, freeVariables: Identifiers{ "std", }, @@ -168848,7 +170545,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12239, + context: p15565, freeVariables: Identifiers{ "std", }, @@ -168894,7 +170591,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12248, + context: p15574, freeVariables: Identifiers{ "a", "b", @@ -168914,7 +170611,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12248, + context: p15574, freeVariables: Identifiers{ "a", }, @@ -168936,7 +170633,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12248, + context: p15574, freeVariables: Identifiers{ "b", }, @@ -169011,7 +170708,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p15584, freeVariables: nil, }, }, @@ -169033,7 +170730,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p15586, freeVariables: nil, }, Value: "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/", @@ -169275,7 +170972,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p15610, freeVariables: Identifiers{ "base64_table", "i", @@ -169299,7 +170996,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p15614, freeVariables: Identifiers{ "base64_table", "i", @@ -169319,7 +171016,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p15617, freeVariables: Identifiers{ "base64_table", }, @@ -169340,7 +171037,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p15620, freeVariables: Identifiers{ "i", }, @@ -169363,7 +171060,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p43, + context: p15623, freeVariables: Identifiers{ "i", }, @@ -169392,7 +171089,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p15626, freeVariables: Identifiers{ "std", }, @@ -169411,7 +171108,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p15629, freeVariables: Identifiers{ "std", }, @@ -169430,7 +171127,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p15632, freeVariables: Identifiers{ "std", }, @@ -169476,7 +171173,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p54, + context: p15637, freeVariables: nil, }, Value: float64(0), @@ -169496,14 +171193,14 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p54, + context: p15639, freeVariables: nil, }, Value: float64(63), OriginalString: "63", }, }, - Named: nil, + Named: []NamedArgument{}, }, TrailingComma: false, TailStrict: false, @@ -169559,7 +171256,7 @@ var StdAst = &DesugaredObject{ }, file: nil, }, - context: p9, + context: p68, freeVariables: Identifiers{ "std", }, @@ -169586,7 +171283,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12260, + context: p15645, freeVariables: Identifiers{ "a", "b", @@ -169610,7 +171307,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12264, + context: p15649, freeVariables: Identifiers{ "aux", "std", @@ -169641,7 +171338,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12268, + context: p15653, freeVariables: Identifiers{ "a", "acc", @@ -169666,7 +171363,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12268, + context: p15653, freeVariables: Identifiers{ "a", "b", @@ -169689,7 +171386,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12268, + context: p15653, freeVariables: Identifiers{ "a", "i", @@ -169710,7 +171407,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12268, + context: p15653, freeVariables: Identifiers{ "i", }, @@ -169732,7 +171429,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12268, + context: p15653, freeVariables: Identifiers{ "a", "std", @@ -169752,7 +171449,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12268, + context: p15653, freeVariables: Identifiers{ "std", }, @@ -169771,7 +171468,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12268, + context: p15653, freeVariables: Identifiers{ "std", }, @@ -169817,7 +171514,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12285, + context: p15670, freeVariables: Identifiers{ "a", }, @@ -169846,7 +171543,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12268, + context: p15653, freeVariables: Identifiers{ "b", "j", @@ -169867,7 +171564,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12268, + context: p15653, freeVariables: Identifiers{ "j", }, @@ -169889,7 +171586,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12268, + context: p15653, freeVariables: Identifiers{ "b", "std", @@ -169909,7 +171606,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12268, + context: p15653, freeVariables: Identifiers{ "std", }, @@ -169928,7 +171625,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12268, + context: p15653, freeVariables: Identifiers{ "std", }, @@ -169974,7 +171671,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12300, + context: p15685, freeVariables: Identifiers{ "b", }, @@ -170003,7 +171700,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12268, + context: p15653, freeVariables: Identifiers{ "acc", }, @@ -170024,7 +171721,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12268, + context: p15653, freeVariables: Identifiers{ "a", "acc", @@ -170137,7 +171834,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12268, + context: p15653, freeVariables: Identifiers{ "a", "i", @@ -170157,7 +171854,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12268, + context: p15653, freeVariables: Identifiers{ "a", }, @@ -170178,7 +171875,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12268, + context: p15653, freeVariables: Identifiers{ "i", }, @@ -170201,7 +171898,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12268, + context: p15653, freeVariables: Identifiers{ "b", "j", @@ -170221,7 +171918,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12268, + context: p15653, freeVariables: Identifiers{ "b", }, @@ -170242,7 +171939,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12268, + context: p15653, freeVariables: Identifiers{ "j", }, @@ -170271,7 +171968,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12268, + context: p15653, freeVariables: Identifiers{ "a", "acc", @@ -170295,7 +171992,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12268, + context: p15653, freeVariables: Identifiers{ "aux", }, @@ -170318,7 +172015,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12332, + context: p15717, freeVariables: Identifiers{ "a", }, @@ -170339,7 +172036,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12332, + context: p15717, freeVariables: Identifiers{ "b", }, @@ -170360,7 +172057,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12332, + context: p15717, freeVariables: Identifiers{ "i", }, @@ -170379,7 +172076,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12332, + context: p15717, freeVariables: Identifiers{ "i", }, @@ -170401,7 +172098,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12332, + context: p15717, freeVariables: nil, }, Value: float64(1), @@ -170422,7 +172119,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12332, + context: p15717, freeVariables: Identifiers{ "j", }, @@ -170441,7 +172138,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12332, + context: p15717, freeVariables: Identifiers{ "j", }, @@ -170463,7 +172160,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12332, + context: p15717, freeVariables: nil, }, Value: float64(1), @@ -170484,7 +172181,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12332, + context: p15717, freeVariables: Identifiers{ "a", "acc", @@ -170505,7 +172202,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12332, + context: p15717, freeVariables: Identifiers{ "acc", }, @@ -170527,7 +172224,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12332, + context: p15717, freeVariables: Identifiers{ "a", "i", @@ -170548,7 +172245,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12354, + context: p15739, freeVariables: Identifiers{ "a", "i", @@ -170568,7 +172265,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12354, + context: p15739, freeVariables: Identifiers{ "a", }, @@ -170589,7 +172286,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12354, + context: p15739, freeVariables: Identifiers{ "i", }, @@ -170622,7 +172319,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12268, + context: p15653, freeVariables: Identifiers{ "a", "acc", @@ -170646,7 +172343,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12268, + context: p15653, freeVariables: Identifiers{ "a", "b", @@ -170668,7 +172365,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12268, + context: p15653, freeVariables: Identifiers{ "a", "i", @@ -170688,7 +172385,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12268, + context: p15653, freeVariables: Identifiers{ "a", }, @@ -170709,7 +172406,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12268, + context: p15653, freeVariables: Identifiers{ "i", }, @@ -170733,7 +172430,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12268, + context: p15653, freeVariables: Identifiers{ "b", "j", @@ -170753,7 +172450,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12268, + context: p15653, freeVariables: Identifiers{ "b", }, @@ -170774,7 +172471,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12268, + context: p15653, freeVariables: Identifiers{ "j", }, @@ -170798,7 +172495,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12268, + context: p15653, freeVariables: Identifiers{ "a", "acc", @@ -170822,7 +172519,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12268, + context: p15653, freeVariables: Identifiers{ "aux", }, @@ -170845,7 +172542,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12382, + context: p15767, freeVariables: Identifiers{ "a", }, @@ -170866,7 +172563,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12382, + context: p15767, freeVariables: Identifiers{ "b", }, @@ -170887,7 +172584,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12382, + context: p15767, freeVariables: Identifiers{ "i", }, @@ -170906,7 +172603,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12382, + context: p15767, freeVariables: Identifiers{ "i", }, @@ -170928,7 +172625,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12382, + context: p15767, freeVariables: nil, }, Value: float64(1), @@ -170949,7 +172646,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12382, + context: p15767, freeVariables: Identifiers{ "j", }, @@ -170970,7 +172667,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12382, + context: p15767, freeVariables: Identifiers{ "acc", }, @@ -170997,7 +172694,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12268, + context: p15653, freeVariables: Identifiers{ "a", "acc", @@ -171021,7 +172718,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12268, + context: p15653, freeVariables: Identifiers{ "aux", }, @@ -171044,7 +172741,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12401, + context: p15786, freeVariables: Identifiers{ "a", }, @@ -171065,7 +172762,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12401, + context: p15786, freeVariables: Identifiers{ "b", }, @@ -171086,7 +172783,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12401, + context: p15786, freeVariables: Identifiers{ "i", }, @@ -171107,7 +172804,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12401, + context: p15786, freeVariables: Identifiers{ "j", }, @@ -171126,7 +172823,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12401, + context: p15786, freeVariables: Identifiers{ "j", }, @@ -171148,7 +172845,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12401, + context: p15786, freeVariables: nil, }, Value: float64(1), @@ -171169,7 +172866,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12401, + context: p15786, freeVariables: Identifiers{ "acc", }, @@ -171203,7 +172900,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12260, + context: p15645, freeVariables: Identifiers{ "a", "aux", @@ -171224,7 +172921,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12260, + context: p15645, freeVariables: Identifiers{ "aux", }, @@ -171247,7 +172944,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12420, + context: p15805, freeVariables: Identifiers{ "a", }, @@ -171268,7 +172965,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12420, + context: p15805, freeVariables: Identifiers{ "b", }, @@ -171289,7 +172986,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12420, + context: p15805, freeVariables: nil, }, Value: float64(0), @@ -171309,7 +173006,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12420, + context: p15805, freeVariables: nil, }, Value: float64(0), @@ -171329,7 +173026,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12420, + context: p15805, freeVariables: nil, }, Elements: nil, @@ -171403,7 +173100,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p15816, freeVariables: nil, }, }, @@ -171425,7 +173122,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p15818, freeVariables: nil, }, Value: "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/", @@ -171667,7 +173364,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p15842, freeVariables: Identifiers{ "base64_table", "i", @@ -171691,7 +173388,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p15846, freeVariables: Identifiers{ "base64_table", "i", @@ -171711,7 +173408,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p15849, freeVariables: Identifiers{ "base64_table", }, @@ -171732,7 +173429,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p15852, freeVariables: Identifiers{ "i", }, @@ -171755,7 +173452,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p43, + context: p15855, freeVariables: Identifiers{ "i", }, @@ -171784,7 +173481,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p15858, freeVariables: Identifiers{ "std", }, @@ -171803,7 +173500,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p15861, freeVariables: Identifiers{ "std", }, @@ -171822,7 +173519,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p15864, freeVariables: Identifiers{ "std", }, @@ -171868,7 +173565,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p54, + context: p15869, freeVariables: nil, }, Value: float64(0), @@ -171888,14 +173585,14 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p54, + context: p15871, freeVariables: nil, }, Value: float64(63), OriginalString: "63", }, }, - Named: nil, + Named: []NamedArgument{}, }, TrailingComma: false, TailStrict: false, @@ -171951,7 +173648,7 @@ var StdAst = &DesugaredObject{ }, file: nil, }, - context: p9, + context: p68, freeVariables: Identifiers{ "std", }, @@ -171978,7 +173675,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12433, + context: p15877, freeVariables: Identifiers{ "a", "b", @@ -172002,7 +173699,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12437, + context: p15881, freeVariables: Identifiers{ "aux", "std", @@ -172033,7 +173730,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12441, + context: p15885, freeVariables: Identifiers{ "a", "acc", @@ -172058,7 +173755,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12441, + context: p15885, freeVariables: Identifiers{ "a", "i", @@ -172079,7 +173776,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12441, + context: p15885, freeVariables: Identifiers{ "i", }, @@ -172101,7 +173798,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12441, + context: p15885, freeVariables: Identifiers{ "a", "std", @@ -172121,7 +173818,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12441, + context: p15885, freeVariables: Identifiers{ "std", }, @@ -172140,7 +173837,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12441, + context: p15885, freeVariables: Identifiers{ "std", }, @@ -172186,7 +173883,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12456, + context: p15900, freeVariables: Identifiers{ "a", }, @@ -172214,7 +173911,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12441, + context: p15885, freeVariables: Identifiers{ "acc", }, @@ -172235,7 +173932,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12441, + context: p15885, freeVariables: Identifiers{ "a", "acc", @@ -172260,7 +173957,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12441, + context: p15885, freeVariables: Identifiers{ "b", "j", @@ -172281,7 +173978,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12441, + context: p15885, freeVariables: Identifiers{ "j", }, @@ -172303,7 +174000,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12441, + context: p15885, freeVariables: Identifiers{ "b", "std", @@ -172323,7 +174020,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12441, + context: p15885, freeVariables: Identifiers{ "std", }, @@ -172342,7 +174039,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12441, + context: p15885, freeVariables: Identifiers{ "std", }, @@ -172388,7 +174085,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12475, + context: p15919, freeVariables: Identifiers{ "b", }, @@ -172416,7 +174113,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12441, + context: p15885, freeVariables: Identifiers{ "a", "acc", @@ -172440,7 +174137,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12441, + context: p15885, freeVariables: Identifiers{ "aux", }, @@ -172463,7 +174160,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12483, + context: p15927, freeVariables: Identifiers{ "a", }, @@ -172484,7 +174181,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12483, + context: p15927, freeVariables: Identifiers{ "b", }, @@ -172505,7 +174202,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12483, + context: p15927, freeVariables: Identifiers{ "i", }, @@ -172524,7 +174221,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12483, + context: p15927, freeVariables: Identifiers{ "i", }, @@ -172546,7 +174243,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12483, + context: p15927, freeVariables: nil, }, Value: float64(1), @@ -172567,7 +174264,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12483, + context: p15927, freeVariables: Identifiers{ "j", }, @@ -172588,7 +174285,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12483, + context: p15927, freeVariables: Identifiers{ "a", "acc", @@ -172609,7 +174306,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12483, + context: p15927, freeVariables: Identifiers{ "acc", }, @@ -172631,7 +174328,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12483, + context: p15927, freeVariables: Identifiers{ "a", "i", @@ -172652,7 +174349,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12502, + context: p15946, freeVariables: Identifiers{ "a", "i", @@ -172672,7 +174369,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12502, + context: p15946, freeVariables: Identifiers{ "a", }, @@ -172693,7 +174390,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12502, + context: p15946, freeVariables: Identifiers{ "i", }, @@ -172726,7 +174423,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12441, + context: p15885, freeVariables: Identifiers{ "a", "acc", @@ -172839,7 +174536,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12441, + context: p15885, freeVariables: Identifiers{ "a", "i", @@ -172859,7 +174556,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12441, + context: p15885, freeVariables: Identifiers{ "a", }, @@ -172880,7 +174577,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12441, + context: p15885, freeVariables: Identifiers{ "i", }, @@ -172903,7 +174600,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12441, + context: p15885, freeVariables: Identifiers{ "b", "j", @@ -172923,7 +174620,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12441, + context: p15885, freeVariables: Identifiers{ "b", }, @@ -172944,7 +174641,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12441, + context: p15885, freeVariables: Identifiers{ "j", }, @@ -172973,7 +174670,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12441, + context: p15885, freeVariables: Identifiers{ "a", "acc", @@ -172997,7 +174694,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12441, + context: p15885, freeVariables: Identifiers{ "aux", }, @@ -173020,7 +174717,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12536, + context: p15980, freeVariables: Identifiers{ "a", }, @@ -173041,7 +174738,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12536, + context: p15980, freeVariables: Identifiers{ "b", }, @@ -173062,7 +174759,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12536, + context: p15980, freeVariables: Identifiers{ "i", }, @@ -173081,7 +174778,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12536, + context: p15980, freeVariables: Identifiers{ "i", }, @@ -173103,7 +174800,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12536, + context: p15980, freeVariables: nil, }, Value: float64(1), @@ -173124,7 +174821,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12536, + context: p15980, freeVariables: Identifiers{ "j", }, @@ -173143,7 +174840,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12536, + context: p15980, freeVariables: Identifiers{ "j", }, @@ -173165,7 +174862,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12536, + context: p15980, freeVariables: nil, }, Value: float64(1), @@ -173186,7 +174883,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12536, + context: p15980, freeVariables: Identifiers{ "acc", }, @@ -173213,7 +174910,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12441, + context: p15885, freeVariables: Identifiers{ "a", "acc", @@ -173237,7 +174934,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12441, + context: p15885, freeVariables: Identifiers{ "a", "b", @@ -173259,7 +174956,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12441, + context: p15885, freeVariables: Identifiers{ "a", "i", @@ -173279,7 +174976,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12441, + context: p15885, freeVariables: Identifiers{ "a", }, @@ -173300,7 +174997,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12441, + context: p15885, freeVariables: Identifiers{ "i", }, @@ -173324,7 +175021,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12441, + context: p15885, freeVariables: Identifiers{ "b", "j", @@ -173344,7 +175041,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12441, + context: p15885, freeVariables: Identifiers{ "b", }, @@ -173365,7 +175062,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12441, + context: p15885, freeVariables: Identifiers{ "j", }, @@ -173389,7 +175086,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12441, + context: p15885, freeVariables: Identifiers{ "a", "acc", @@ -173413,7 +175110,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12441, + context: p15885, freeVariables: Identifiers{ "aux", }, @@ -173436,7 +175133,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12574, + context: p16018, freeVariables: Identifiers{ "a", }, @@ -173457,7 +175154,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12574, + context: p16018, freeVariables: Identifiers{ "b", }, @@ -173478,7 +175175,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12574, + context: p16018, freeVariables: Identifiers{ "i", }, @@ -173497,7 +175194,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12574, + context: p16018, freeVariables: Identifiers{ "i", }, @@ -173519,7 +175216,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12574, + context: p16018, freeVariables: nil, }, Value: float64(1), @@ -173540,7 +175237,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12574, + context: p16018, freeVariables: Identifiers{ "j", }, @@ -173561,7 +175258,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12574, + context: p16018, freeVariables: Identifiers{ "a", "acc", @@ -173582,7 +175279,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12574, + context: p16018, freeVariables: Identifiers{ "acc", }, @@ -173604,7 +175301,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12574, + context: p16018, freeVariables: Identifiers{ "a", "i", @@ -173625,7 +175322,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12593, + context: p16037, freeVariables: Identifiers{ "a", "i", @@ -173645,7 +175342,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12593, + context: p16037, freeVariables: Identifiers{ "a", }, @@ -173666,7 +175363,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12593, + context: p16037, freeVariables: Identifiers{ "i", }, @@ -173699,7 +175396,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12441, + context: p15885, freeVariables: Identifiers{ "a", "acc", @@ -173723,7 +175420,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12441, + context: p15885, freeVariables: Identifiers{ "aux", }, @@ -173746,7 +175443,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12605, + context: p16049, freeVariables: Identifiers{ "a", }, @@ -173767,7 +175464,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12605, + context: p16049, freeVariables: Identifiers{ "b", }, @@ -173788,7 +175485,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12605, + context: p16049, freeVariables: Identifiers{ "i", }, @@ -173809,7 +175506,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12605, + context: p16049, freeVariables: Identifiers{ "j", }, @@ -173828,7 +175525,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12605, + context: p16049, freeVariables: Identifiers{ "j", }, @@ -173850,7 +175547,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12605, + context: p16049, freeVariables: nil, }, Value: float64(1), @@ -173871,7 +175568,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12605, + context: p16049, freeVariables: Identifiers{ "acc", }, @@ -173906,7 +175603,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12433, + context: p15877, freeVariables: Identifiers{ "a", "aux", @@ -173927,7 +175624,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12433, + context: p15877, freeVariables: Identifiers{ "aux", }, @@ -173950,7 +175647,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12624, + context: p16068, freeVariables: Identifiers{ "a", }, @@ -173971,7 +175668,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12624, + context: p16068, freeVariables: Identifiers{ "b", }, @@ -173992,7 +175689,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12624, + context: p16068, freeVariables: nil, }, Value: float64(0), @@ -174012,7 +175709,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12624, + context: p16068, freeVariables: nil, }, Value: float64(0), @@ -174032,7 +175729,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12624, + context: p16068, freeVariables: nil, }, Elements: nil, @@ -174106,7 +175803,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p16079, freeVariables: nil, }, }, @@ -174128,7 +175825,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p16081, freeVariables: nil, }, Value: "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/", @@ -174370,7 +176067,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p16105, freeVariables: Identifiers{ "base64_table", "i", @@ -174394,7 +176091,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p16109, freeVariables: Identifiers{ "base64_table", "i", @@ -174414,7 +176111,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p16112, freeVariables: Identifiers{ "base64_table", }, @@ -174435,7 +176132,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p16115, freeVariables: Identifiers{ "i", }, @@ -174458,7 +176155,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p43, + context: p16118, freeVariables: Identifiers{ "i", }, @@ -174487,7 +176184,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p16121, freeVariables: Identifiers{ "std", }, @@ -174506,7 +176203,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p16124, freeVariables: Identifiers{ "std", }, @@ -174525,7 +176222,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p16127, freeVariables: Identifiers{ "std", }, @@ -174571,7 +176268,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p54, + context: p16132, freeVariables: nil, }, Value: float64(0), @@ -174591,14 +176288,14 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p54, + context: p16134, freeVariables: nil, }, Value: float64(63), OriginalString: "63", }, }, - Named: nil, + Named: []NamedArgument{}, }, TrailingComma: false, TailStrict: false, @@ -174654,7 +176351,7 @@ var StdAst = &DesugaredObject{ }, file: nil, }, - context: p9, + context: p68, freeVariables: Identifiers{ "std", }, @@ -174681,7 +176378,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12637, + context: p16140, freeVariables: Identifiers{ "patch", "std", @@ -174787,7 +176484,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12637, + context: p16140, freeVariables: Identifiers{ "patch", "std", @@ -174807,7 +176504,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12637, + context: p16140, freeVariables: Identifiers{ "std", }, @@ -174826,7 +176523,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12637, + context: p16140, freeVariables: Identifiers{ "std", }, @@ -174872,7 +176569,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12656, + context: p16159, freeVariables: Identifiers{ "patch", }, @@ -174899,7 +176596,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12637, + context: p16140, freeVariables: nil, }, Value: "object", @@ -174926,7 +176623,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12637, + context: p16140, freeVariables: Identifiers{ "patch", "std", @@ -174950,7 +176647,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12663, + context: p16166, freeVariables: Identifiers{ "std", "target", @@ -175055,7 +176752,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12663, + context: p16166, freeVariables: Identifiers{ "std", "target", @@ -175075,7 +176772,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12663, + context: p16166, freeVariables: Identifiers{ "std", }, @@ -175094,7 +176791,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12663, + context: p16166, freeVariables: Identifiers{ "std", }, @@ -175140,7 +176837,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12682, + context: p16185, freeVariables: Identifiers{ "target", }, @@ -175167,7 +176864,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12663, + context: p16166, freeVariables: nil, }, Value: "object", @@ -175194,7 +176891,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12663, + context: p16166, freeVariables: Identifiers{ "target", }, @@ -175215,7 +176912,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12663, + context: p16166, freeVariables: nil, }, Asserts: nil, @@ -175239,7 +176936,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12637, + context: p16140, freeVariables: Identifiers{ "patch", "std", @@ -175263,7 +176960,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12692, + context: p16195, freeVariables: Identifiers{ "std", "target_object", @@ -175368,7 +177065,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12692, + context: p16195, freeVariables: Identifiers{ "std", "target_object", @@ -175388,7 +177085,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12692, + context: p16195, freeVariables: Identifiers{ "std", }, @@ -175407,7 +177104,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12692, + context: p16195, freeVariables: Identifiers{ "std", }, @@ -175453,7 +177150,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12711, + context: p16214, freeVariables: Identifiers{ "target_object", }, @@ -175480,7 +177177,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12692, + context: p16195, freeVariables: nil, }, Value: "object", @@ -175507,7 +177204,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12692, + context: p16195, freeVariables: Identifiers{ "std", "target_object", @@ -175527,7 +177224,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12692, + context: p16195, freeVariables: Identifiers{ "std", }, @@ -175546,7 +177243,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12692, + context: p16195, freeVariables: Identifiers{ "std", }, @@ -175592,7 +177289,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12723, + context: p16226, freeVariables: Identifiers{ "target_object", }, @@ -175619,7 +177316,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12692, + context: p16195, freeVariables: nil, }, Elements: nil, @@ -175643,7 +177340,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12637, + context: p16140, freeVariables: Identifiers{ "patch", "std", @@ -175887,7 +177584,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12751, + context: p16254, freeVariables: Identifiers{ "k", "patch", @@ -175907,7 +177604,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12751, + context: p16254, freeVariables: Identifiers{ "patch", }, @@ -175928,7 +177625,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12751, + context: p16254, freeVariables: Identifiers{ "k", }, @@ -175951,7 +177648,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12751, + context: p16254, freeVariables: nil, }, }, @@ -175995,7 +177692,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12762, + context: p16265, freeVariables: Identifiers{ "k", }, @@ -176041,7 +177738,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12751, + context: p16254, freeVariables: Identifiers{ "patch", "std", @@ -176061,7 +177758,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12751, + context: p16254, freeVariables: Identifiers{ "std", }, @@ -176080,7 +177777,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12751, + context: p16254, freeVariables: Identifiers{ "std", }, @@ -176126,7 +177823,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12774, + context: p16277, freeVariables: Identifiers{ "patch", }, @@ -176162,7 +177859,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12637, + context: p16140, freeVariables: Identifiers{ "null_fields", "patch", @@ -176188,7 +177885,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12780, + context: p16283, freeVariables: Identifiers{ "patch", "std", @@ -176209,7 +177906,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12780, + context: p16283, freeVariables: Identifiers{ "std", }, @@ -176228,7 +177925,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12780, + context: p16283, freeVariables: Identifiers{ "std", }, @@ -176274,7 +177971,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12789, + context: p16292, freeVariables: Identifiers{ "target_fields", }, @@ -176295,7 +177992,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12789, + context: p16292, freeVariables: Identifiers{ "patch", "std", @@ -176315,7 +178012,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12789, + context: p16292, freeVariables: Identifiers{ "std", }, @@ -176334,7 +178031,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12789, + context: p16292, freeVariables: Identifiers{ "std", }, @@ -176380,7 +178077,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12800, + context: p16303, freeVariables: Identifiers{ "patch", }, @@ -176643,7 +178340,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12637, + context: p16140, freeVariables: Identifiers{ "k", "patch", @@ -176669,7 +178366,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12637, + context: p16140, freeVariables: Identifiers{ "k", }, @@ -176690,7 +178387,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12830, + context: p16333, freeVariables: Identifiers{ "k", "patch", @@ -176712,7 +178409,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12830, + context: p16333, freeVariables: Identifiers{ "k", "patch", @@ -176734,7 +178431,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12830, + context: p16333, freeVariables: Identifiers{ "k", "patch", @@ -176755,7 +178452,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12830, + context: p16333, freeVariables: Identifiers{ "std", }, @@ -176774,7 +178471,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12830, + context: p16333, freeVariables: Identifiers{ "std", }, @@ -176820,7 +178517,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12843, + context: p16346, freeVariables: Identifiers{ "patch", }, @@ -176841,7 +178538,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12843, + context: p16346, freeVariables: Identifiers{ "k", }, @@ -176869,7 +178566,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12830, + context: p16333, freeVariables: Identifiers{ "k", "target_object", @@ -176889,7 +178586,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12830, + context: p16333, freeVariables: Identifiers{ "target_object", }, @@ -176910,7 +178607,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12830, + context: p16333, freeVariables: Identifiers{ "k", }, @@ -176933,7 +178630,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12830, + context: p16333, freeVariables: Identifiers{ "k", "patch", @@ -176955,7 +178652,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12830, + context: p16333, freeVariables: Identifiers{ "k", "std", @@ -176977,7 +178674,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12830, + context: p16333, freeVariables: Identifiers{ "k", "std", @@ -176998,7 +178695,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12830, + context: p16333, freeVariables: Identifiers{ "std", }, @@ -177017,7 +178714,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12830, + context: p16333, freeVariables: Identifiers{ "std", }, @@ -177063,7 +178760,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12866, + context: p16369, freeVariables: Identifiers{ "target_object", }, @@ -177084,7 +178781,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12866, + context: p16369, freeVariables: Identifiers{ "k", }, @@ -177112,7 +178809,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12830, + context: p16333, freeVariables: Identifiers{ "k", "patch", @@ -177133,7 +178830,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12830, + context: p16333, freeVariables: Identifiers{ "std", }, @@ -177152,7 +178849,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12830, + context: p16333, freeVariables: Identifiers{ "std", }, @@ -177198,7 +178895,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12879, + context: p16382, freeVariables: nil, }, }, @@ -177216,7 +178913,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12879, + context: p16382, freeVariables: Identifiers{ "k", "patch", @@ -177236,7 +178933,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12879, + context: p16382, freeVariables: Identifiers{ "patch", }, @@ -177257,7 +178954,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12879, + context: p16382, freeVariables: Identifiers{ "k", }, @@ -177286,7 +178983,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12830, + context: p16333, freeVariables: Identifiers{ "k", "patch", @@ -177308,7 +179005,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12830, + context: p16333, freeVariables: Identifiers{ "std", }, @@ -177327,7 +179024,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12830, + context: p16333, freeVariables: Identifiers{ "std", }, @@ -177373,7 +179070,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12895, + context: p16398, freeVariables: Identifiers{ "k", "target_object", @@ -177393,7 +179090,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12895, + context: p16398, freeVariables: Identifiers{ "target_object", }, @@ -177414,7 +179111,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12895, + context: p16398, freeVariables: Identifiers{ "k", }, @@ -177437,7 +179134,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12895, + context: p16398, freeVariables: Identifiers{ "k", "patch", @@ -177457,7 +179154,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12895, + context: p16398, freeVariables: Identifiers{ "patch", }, @@ -177478,7 +179175,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12895, + context: p16398, freeVariables: Identifiers{ "k", }, @@ -177517,7 +179214,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12637, + context: p16140, freeVariables: Identifiers{ "both_fields", "null_fields", @@ -177538,7 +179235,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12637, + context: p16140, freeVariables: Identifiers{ "std", }, @@ -177557,7 +179254,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12637, + context: p16140, freeVariables: Identifiers{ "std", }, @@ -177603,7 +179300,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12916, + context: p16419, freeVariables: Identifiers{ "both_fields", }, @@ -177624,7 +179321,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12916, + context: p16419, freeVariables: Identifiers{ "null_fields", }, @@ -177667,7 +179364,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12637, + context: p16140, freeVariables: Identifiers{ "patch", }, @@ -177736,7 +179433,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p16429, freeVariables: nil, }, }, @@ -177758,7 +179455,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p16431, freeVariables: nil, }, Value: "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/", @@ -178000,7 +179697,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p16455, freeVariables: Identifiers{ "base64_table", "i", @@ -178024,7 +179721,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p16459, freeVariables: Identifiers{ "base64_table", "i", @@ -178044,7 +179741,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p16462, freeVariables: Identifiers{ "base64_table", }, @@ -178065,7 +179762,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p16465, freeVariables: Identifiers{ "i", }, @@ -178088,7 +179785,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p43, + context: p16468, freeVariables: Identifiers{ "i", }, @@ -178117,7 +179814,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p16471, freeVariables: Identifiers{ "std", }, @@ -178136,7 +179833,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p16474, freeVariables: Identifiers{ "std", }, @@ -178155,7 +179852,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p16477, freeVariables: Identifiers{ "std", }, @@ -178201,7 +179898,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p54, + context: p16482, freeVariables: nil, }, Value: float64(0), @@ -178221,14 +179918,14 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p54, + context: p16484, freeVariables: nil, }, Value: float64(63), OriginalString: "63", }, }, - Named: nil, + Named: []NamedArgument{}, }, TrailingComma: false, TailStrict: false, @@ -178284,7 +179981,7 @@ var StdAst = &DesugaredObject{ }, file: nil, }, - context: p9, + context: p68, freeVariables: Identifiers{ "std", }, @@ -178310,7 +180007,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12928, + context: p16490, freeVariables: Identifiers{ "o", "std", @@ -178330,7 +180027,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12928, + context: p16490, freeVariables: Identifiers{ "std", }, @@ -178349,7 +180046,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12928, + context: p16490, freeVariables: Identifiers{ "std", }, @@ -178395,7 +180092,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12937, + context: p16499, freeVariables: Identifiers{ "o", }, @@ -178416,7 +180113,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12937, + context: p16499, freeVariables: nil, }, Value: false, @@ -178488,7 +180185,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p16506, freeVariables: nil, }, }, @@ -178510,7 +180207,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p16508, freeVariables: nil, }, Value: "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/", @@ -178752,7 +180449,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p16532, freeVariables: Identifiers{ "base64_table", "i", @@ -178776,7 +180473,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p16536, freeVariables: Identifiers{ "base64_table", "i", @@ -178796,7 +180493,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p16539, freeVariables: Identifiers{ "base64_table", }, @@ -178817,7 +180514,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p16542, freeVariables: Identifiers{ "i", }, @@ -178840,7 +180537,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p43, + context: p16545, freeVariables: Identifiers{ "i", }, @@ -178869,7 +180566,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p16548, freeVariables: Identifiers{ "std", }, @@ -178888,7 +180585,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p16551, freeVariables: Identifiers{ "std", }, @@ -178907,7 +180604,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p16554, freeVariables: Identifiers{ "std", }, @@ -178953,7 +180650,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p54, + context: p16559, freeVariables: nil, }, Value: float64(0), @@ -178973,14 +180670,14 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p54, + context: p16561, freeVariables: nil, }, Value: float64(63), OriginalString: "63", }, }, - Named: nil, + Named: []NamedArgument{}, }, TrailingComma: false, TailStrict: false, @@ -179036,7 +180733,7 @@ var StdAst = &DesugaredObject{ }, file: nil, }, - context: p9, + context: p68, freeVariables: Identifiers{ "std", }, @@ -179062,7 +180759,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12946, + context: p16567, freeVariables: Identifiers{ "o", "std", @@ -179082,7 +180779,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12946, + context: p16567, freeVariables: Identifiers{ "std", }, @@ -179101,7 +180798,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12946, + context: p16567, freeVariables: Identifiers{ "std", }, @@ -179147,7 +180844,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12955, + context: p16576, freeVariables: Identifiers{ "o", }, @@ -179168,7 +180865,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12955, + context: p16576, freeVariables: nil, }, Value: true, @@ -179240,7 +180937,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p16583, freeVariables: nil, }, }, @@ -179262,7 +180959,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p16585, freeVariables: nil, }, Value: "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/", @@ -179504,7 +181201,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p16609, freeVariables: Identifiers{ "base64_table", "i", @@ -179528,7 +181225,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p16613, freeVariables: Identifiers{ "base64_table", "i", @@ -179548,7 +181245,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p16616, freeVariables: Identifiers{ "base64_table", }, @@ -179569,7 +181266,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p16619, freeVariables: Identifiers{ "i", }, @@ -179592,7 +181289,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p43, + context: p16622, freeVariables: Identifiers{ "i", }, @@ -179621,7 +181318,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p16625, freeVariables: Identifiers{ "std", }, @@ -179640,7 +181337,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p16628, freeVariables: Identifiers{ "std", }, @@ -179659,7 +181356,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p16631, freeVariables: Identifiers{ "std", }, @@ -179705,7 +181402,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p54, + context: p16636, freeVariables: nil, }, Value: float64(0), @@ -179725,14 +181422,14 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p54, + context: p16638, freeVariables: nil, }, Value: float64(63), OriginalString: "63", }, }, - Named: nil, + Named: []NamedArgument{}, }, TrailingComma: false, TailStrict: false, @@ -179788,7 +181485,7 @@ var StdAst = &DesugaredObject{ }, file: nil, }, - context: p9, + context: p68, freeVariables: Identifiers{ "std", }, @@ -179815,7 +181512,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12964, + context: p16644, freeVariables: Identifiers{ "f", "o", @@ -179836,7 +181533,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12964, + context: p16644, freeVariables: Identifiers{ "std", }, @@ -179855,7 +181552,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12964, + context: p16644, freeVariables: Identifiers{ "std", }, @@ -179901,7 +181598,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12973, + context: p16653, freeVariables: Identifiers{ "o", }, @@ -179922,7 +181619,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12973, + context: p16653, freeVariables: Identifiers{ "f", }, @@ -179943,7 +181640,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12973, + context: p16653, freeVariables: nil, }, Value: false, @@ -180015,7 +181712,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p16662, freeVariables: nil, }, }, @@ -180037,7 +181734,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p16664, freeVariables: nil, }, Value: "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/", @@ -180279,7 +181976,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p16688, freeVariables: Identifiers{ "base64_table", "i", @@ -180303,7 +182000,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p16692, freeVariables: Identifiers{ "base64_table", "i", @@ -180323,7 +182020,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p16695, freeVariables: Identifiers{ "base64_table", }, @@ -180344,7 +182041,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p16698, freeVariables: Identifiers{ "i", }, @@ -180367,7 +182064,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p43, + context: p16701, freeVariables: Identifiers{ "i", }, @@ -180396,7 +182093,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p16704, freeVariables: Identifiers{ "std", }, @@ -180415,7 +182112,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p16707, freeVariables: Identifiers{ "std", }, @@ -180434,7 +182131,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p16710, freeVariables: Identifiers{ "std", }, @@ -180480,7 +182177,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p54, + context: p16715, freeVariables: nil, }, Value: float64(0), @@ -180500,14 +182197,14 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p54, + context: p16717, freeVariables: nil, }, Value: float64(63), OriginalString: "63", }, }, - Named: nil, + Named: []NamedArgument{}, }, TrailingComma: false, TailStrict: false, @@ -180563,7 +182260,7 @@ var StdAst = &DesugaredObject{ }, file: nil, }, - context: p9, + context: p68, freeVariables: Identifiers{ "std", }, @@ -180590,7 +182287,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12984, + context: p16723, freeVariables: Identifiers{ "f", "o", @@ -180611,7 +182308,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12984, + context: p16723, freeVariables: Identifiers{ "std", }, @@ -180630,7 +182327,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12984, + context: p16723, freeVariables: Identifiers{ "std", }, @@ -180676,7 +182373,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12993, + context: p16732, freeVariables: Identifiers{ "o", }, @@ -180697,7 +182394,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12993, + context: p16732, freeVariables: Identifiers{ "f", }, @@ -180718,7 +182415,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p12993, + context: p16732, freeVariables: nil, }, Value: true, @@ -180790,7 +182487,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p16741, freeVariables: nil, }, }, @@ -180812,7 +182509,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p16743, freeVariables: nil, }, Value: "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/", @@ -181054,7 +182751,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p16767, freeVariables: Identifiers{ "base64_table", "i", @@ -181078,7 +182775,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p16771, freeVariables: Identifiers{ "base64_table", "i", @@ -181098,7 +182795,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p16774, freeVariables: Identifiers{ "base64_table", }, @@ -181119,7 +182816,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p16777, freeVariables: Identifiers{ "i", }, @@ -181142,7 +182839,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p43, + context: p16780, freeVariables: Identifiers{ "i", }, @@ -181171,7 +182868,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p16783, freeVariables: Identifiers{ "std", }, @@ -181190,7 +182887,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p16786, freeVariables: Identifiers{ "std", }, @@ -181209,7 +182906,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p16789, freeVariables: Identifiers{ "std", }, @@ -181255,7 +182952,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p54, + context: p16794, freeVariables: nil, }, Value: float64(0), @@ -181275,14 +182972,14 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p54, + context: p16796, freeVariables: nil, }, Value: float64(63), OriginalString: "63", }, }, - Named: nil, + Named: []NamedArgument{}, }, TrailingComma: false, TailStrict: false, @@ -181338,7 +183035,7 @@ var StdAst = &DesugaredObject{ }, file: nil, }, - context: p9, + context: p68, freeVariables: Identifiers{ "std", }, @@ -181365,7 +183062,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13004, + context: p16802, freeVariables: Identifiers{ "a", "b", @@ -181389,7 +183086,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13008, + context: p16806, freeVariables: Identifiers{ "a", "std", @@ -181409,7 +183106,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13008, + context: p16806, freeVariables: Identifiers{ "std", }, @@ -181428,7 +183125,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13008, + context: p16806, freeVariables: Identifiers{ "std", }, @@ -181474,7 +183171,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13017, + context: p16815, freeVariables: Identifiers{ "a", }, @@ -181504,7 +183201,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13004, + context: p16802, freeVariables: Identifiers{ "a", "b", @@ -181529,7 +183226,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13023, + context: p16821, freeVariables: Identifiers{ "b", "std", @@ -181549,7 +183246,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13023, + context: p16821, freeVariables: Identifiers{ "std", }, @@ -181568,7 +183265,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13023, + context: p16821, freeVariables: Identifiers{ "std", }, @@ -181614,7 +183311,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13032, + context: p16830, freeVariables: Identifiers{ "b", }, @@ -181644,7 +183341,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13004, + context: p16802, freeVariables: Identifiers{ "a", "b", @@ -181667,7 +183364,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13004, + context: p16802, freeVariables: Identifiers{ "std", "ta", @@ -181689,7 +183386,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13004, + context: p16802, freeVariables: Identifiers{ "std", "ta", @@ -181710,7 +183407,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13004, + context: p16802, freeVariables: Identifiers{ "std", }, @@ -181729,7 +183426,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13004, + context: p16802, freeVariables: Identifiers{ "std", }, @@ -181775,7 +183472,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13047, + context: p16845, freeVariables: Identifiers{ "ta", }, @@ -181796,7 +183493,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13047, + context: p16845, freeVariables: Identifiers{ "tb", }, @@ -181824,7 +183521,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13004, + context: p16802, freeVariables: nil, }, Value: false, @@ -181843,7 +183540,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13004, + context: p16802, freeVariables: Identifiers{ "a", "b", @@ -181865,7 +183562,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13004, + context: p16802, freeVariables: Identifiers{ "std", "ta", @@ -181885,7 +183582,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13004, + context: p16802, freeVariables: Identifiers{ "std", }, @@ -181904,7 +183601,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13004, + context: p16802, freeVariables: Identifiers{ "std", }, @@ -181950,7 +183647,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13063, + context: p16861, freeVariables: Identifiers{ "ta", }, @@ -181971,7 +183668,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13063, + context: p16861, freeVariables: nil, }, Value: "array", @@ -181998,7 +183695,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13004, + context: p16802, freeVariables: Identifiers{ "a", "b", @@ -182022,7 +183719,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13070, + context: p16868, freeVariables: Identifiers{ "a", "std", @@ -182042,7 +183739,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13070, + context: p16868, freeVariables: Identifiers{ "std", }, @@ -182061,7 +183758,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13070, + context: p16868, freeVariables: Identifiers{ "std", }, @@ -182107,7 +183804,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13079, + context: p16877, freeVariables: Identifiers{ "a", }, @@ -182137,7 +183834,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13004, + context: p16802, freeVariables: Identifiers{ "a", "b", @@ -182159,7 +183856,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13004, + context: p16802, freeVariables: Identifiers{ "b", "la", @@ -182181,7 +183878,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13004, + context: p16802, freeVariables: Identifiers{ "b", "la", @@ -182202,7 +183899,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13004, + context: p16802, freeVariables: Identifiers{ "std", }, @@ -182221,7 +183918,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13004, + context: p16802, freeVariables: Identifiers{ "std", }, @@ -182267,7 +183964,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13094, + context: p16892, freeVariables: Identifiers{ "la", }, @@ -182288,7 +183985,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13094, + context: p16892, freeVariables: Identifiers{ "b", "std", @@ -182308,7 +184005,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13094, + context: p16892, freeVariables: Identifiers{ "std", }, @@ -182327,7 +184024,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13094, + context: p16892, freeVariables: Identifiers{ "std", }, @@ -182373,7 +184070,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13105, + context: p16903, freeVariables: Identifiers{ "b", }, @@ -182407,7 +184104,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13004, + context: p16802, freeVariables: nil, }, Value: false, @@ -182426,7 +184123,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13004, + context: p16802, freeVariables: Identifiers{ "a", "b", @@ -182451,7 +184148,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13112, + context: p16910, freeVariables: Identifiers{ "aux", "la", @@ -182481,7 +184178,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13116, + context: p16914, freeVariables: Identifiers{ "a", "aux", @@ -182505,7 +184202,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13116, + context: p16914, freeVariables: Identifiers{ "i", "la", @@ -182525,7 +184222,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13116, + context: p16914, freeVariables: Identifiers{ "i", }, @@ -182547,7 +184244,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13116, + context: p16914, freeVariables: Identifiers{ "la", }, @@ -182569,7 +184266,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13116, + context: p16914, freeVariables: nil, }, Value: true, @@ -182588,7 +184285,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13116, + context: p16914, freeVariables: Identifiers{ "a", "aux", @@ -182721,7 +184418,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13116, + context: p16914, freeVariables: Identifiers{ "a", "i", @@ -182741,7 +184438,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13116, + context: p16914, freeVariables: Identifiers{ "a", }, @@ -182762,7 +184459,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13116, + context: p16914, freeVariables: Identifiers{ "i", }, @@ -182785,7 +184482,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13116, + context: p16914, freeVariables: Identifiers{ "b", "i", @@ -182805,7 +184502,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13116, + context: p16914, freeVariables: Identifiers{ "b", }, @@ -182826,7 +184523,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13116, + context: p16914, freeVariables: Identifiers{ "i", }, @@ -182856,7 +184553,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13116, + context: p16914, freeVariables: nil, }, Value: false, @@ -182875,7 +184572,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13116, + context: p16914, freeVariables: Identifiers{ "a", "aux", @@ -182897,7 +184594,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13116, + context: p16914, freeVariables: Identifiers{ "aux", }, @@ -182920,7 +184617,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13156, + context: p16954, freeVariables: Identifiers{ "a", }, @@ -182941,7 +184638,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13156, + context: p16954, freeVariables: Identifiers{ "b", }, @@ -182962,7 +184659,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13156, + context: p16954, freeVariables: Identifiers{ "i", }, @@ -182981,7 +184678,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13156, + context: p16954, freeVariables: Identifiers{ "i", }, @@ -183003,7 +184700,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13156, + context: p16954, freeVariables: nil, }, Value: float64(1), @@ -183036,7 +184733,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13004, + context: p16802, freeVariables: Identifiers{ "a", "aux", @@ -183057,7 +184754,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13004, + context: p16802, freeVariables: Identifiers{ "aux", }, @@ -183080,7 +184777,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13171, + context: p16969, freeVariables: Identifiers{ "a", }, @@ -183101,7 +184798,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13171, + context: p16969, freeVariables: Identifiers{ "b", }, @@ -183122,7 +184819,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13171, + context: p16969, freeVariables: nil, }, Value: float64(0), @@ -183151,7 +184848,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13004, + context: p16802, freeVariables: Identifiers{ "a", "b", @@ -183173,7 +184870,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13004, + context: p16802, freeVariables: Identifiers{ "std", "ta", @@ -183193,7 +184890,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13004, + context: p16802, freeVariables: Identifiers{ "std", }, @@ -183212,7 +184909,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13004, + context: p16802, freeVariables: Identifiers{ "std", }, @@ -183258,7 +184955,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13187, + context: p16985, freeVariables: Identifiers{ "ta", }, @@ -183279,7 +184976,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13187, + context: p16985, freeVariables: nil, }, Value: "object", @@ -183306,7 +185003,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13004, + context: p16802, freeVariables: Identifiers{ "a", "b", @@ -183330,7 +185027,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13194, + context: p16992, freeVariables: Identifiers{ "a", "std", @@ -183350,7 +185047,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13194, + context: p16992, freeVariables: Identifiers{ "std", }, @@ -183369,7 +185066,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13194, + context: p16992, freeVariables: Identifiers{ "std", }, @@ -183415,7 +185112,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13203, + context: p17001, freeVariables: Identifiers{ "a", }, @@ -183445,7 +185142,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13004, + context: p16802, freeVariables: Identifiers{ "a", "b", @@ -183470,7 +185167,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13209, + context: p17007, freeVariables: Identifiers{ "fields", "std", @@ -183490,7 +185187,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13209, + context: p17007, freeVariables: Identifiers{ "std", }, @@ -183509,7 +185206,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13209, + context: p17007, freeVariables: Identifiers{ "std", }, @@ -183555,7 +185252,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13218, + context: p17016, freeVariables: Identifiers{ "fields", }, @@ -183585,7 +185282,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13004, + context: p16802, freeVariables: Identifiers{ "a", "b", @@ -183716,7 +185413,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13004, + context: p16802, freeVariables: Identifiers{ "fields", }, @@ -183737,7 +185434,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13004, + context: p16802, freeVariables: Identifiers{ "b", "std", @@ -183757,7 +185454,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13004, + context: p16802, freeVariables: Identifiers{ "std", }, @@ -183776,7 +185473,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13004, + context: p16802, freeVariables: Identifiers{ "std", }, @@ -183822,7 +185519,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13243, + context: p17041, freeVariables: Identifiers{ "b", }, @@ -183856,7 +185553,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13004, + context: p16802, freeVariables: nil, }, Value: false, @@ -183875,7 +185572,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13004, + context: p16802, freeVariables: Identifiers{ "a", "b", @@ -183901,7 +185598,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13250, + context: p17048, freeVariables: Identifiers{ "aux", "fields", @@ -183932,7 +185629,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13254, + context: p17052, freeVariables: Identifiers{ "a", "aux", @@ -183957,7 +185654,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13254, + context: p17052, freeVariables: Identifiers{ "i", "lfields", @@ -183977,7 +185674,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13254, + context: p17052, freeVariables: Identifiers{ "i", }, @@ -183999,7 +185696,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13254, + context: p17052, freeVariables: Identifiers{ "lfields", }, @@ -184021,7 +185718,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13254, + context: p17052, freeVariables: nil, }, Value: true, @@ -184040,7 +185737,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13254, + context: p17052, freeVariables: Identifiers{ "a", "aux", @@ -184064,7 +185761,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13254, + context: p17052, freeVariables: Identifiers{ "a", "b", @@ -184090,7 +185787,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13269, + context: p17067, freeVariables: Identifiers{ "fields", "i", @@ -184110,7 +185807,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13269, + context: p17067, freeVariables: Identifiers{ "fields", }, @@ -184131,7 +185828,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13269, + context: p17067, freeVariables: Identifiers{ "i", }, @@ -184267,7 +185964,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13254, + context: p17052, freeVariables: Identifiers{ "a", "f", @@ -184287,7 +185984,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13254, + context: p17052, freeVariables: Identifiers{ "a", }, @@ -184308,7 +186005,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13254, + context: p17052, freeVariables: Identifiers{ "f", }, @@ -184331,7 +186028,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13254, + context: p17052, freeVariables: Identifiers{ "b", "f", @@ -184351,7 +186048,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13254, + context: p17052, freeVariables: Identifiers{ "b", }, @@ -184372,7 +186069,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13254, + context: p17052, freeVariables: Identifiers{ "f", }, @@ -184403,7 +186100,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13254, + context: p17052, freeVariables: nil, }, Value: false, @@ -184422,7 +186119,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13254, + context: p17052, freeVariables: Identifiers{ "a", "aux", @@ -184444,7 +186141,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13254, + context: p17052, freeVariables: Identifiers{ "aux", }, @@ -184467,7 +186164,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13304, + context: p17102, freeVariables: Identifiers{ "a", }, @@ -184488,7 +186185,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13304, + context: p17102, freeVariables: Identifiers{ "b", }, @@ -184509,7 +186206,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13304, + context: p17102, freeVariables: Identifiers{ "i", }, @@ -184528,7 +186225,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13304, + context: p17102, freeVariables: Identifiers{ "i", }, @@ -184550,7 +186247,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13304, + context: p17102, freeVariables: nil, }, Value: float64(1), @@ -184583,7 +186280,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13004, + context: p16802, freeVariables: Identifiers{ "a", "aux", @@ -184604,7 +186301,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13004, + context: p16802, freeVariables: Identifiers{ "aux", }, @@ -184627,7 +186324,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13319, + context: p17117, freeVariables: Identifiers{ "a", }, @@ -184648,7 +186345,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13319, + context: p17117, freeVariables: Identifiers{ "b", }, @@ -184669,7 +186366,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13319, + context: p17117, freeVariables: nil, }, Value: float64(0), @@ -184699,7 +186396,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13004, + context: p16802, freeVariables: Identifiers{ "a", "b", @@ -184720,7 +186417,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13004, + context: p16802, freeVariables: Identifiers{ "std", }, @@ -184739,7 +186436,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13004, + context: p16802, freeVariables: Identifiers{ "std", }, @@ -184785,7 +186482,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13333, + context: p17131, freeVariables: Identifiers{ "a", }, @@ -184806,7 +186503,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13333, + context: p17131, freeVariables: Identifiers{ "b", }, @@ -184885,7 +186582,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p17139, freeVariables: nil, }, }, @@ -184907,7 +186604,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p17141, freeVariables: nil, }, Value: "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/", @@ -185149,7 +186846,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p17165, freeVariables: Identifiers{ "base64_table", "i", @@ -185173,7 +186870,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p17169, freeVariables: Identifiers{ "base64_table", "i", @@ -185193,7 +186890,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p17172, freeVariables: Identifiers{ "base64_table", }, @@ -185214,7 +186911,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p17175, freeVariables: Identifiers{ "i", }, @@ -185237,7 +186934,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p43, + context: p17178, freeVariables: Identifiers{ "i", }, @@ -185266,7 +186963,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p17181, freeVariables: Identifiers{ "std", }, @@ -185285,7 +186982,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p17184, freeVariables: Identifiers{ "std", }, @@ -185304,7 +187001,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p17187, freeVariables: Identifiers{ "std", }, @@ -185350,7 +187047,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p54, + context: p17192, freeVariables: nil, }, Value: float64(0), @@ -185370,14 +187067,14 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p54, + context: p17194, freeVariables: nil, }, Value: float64(63), OriginalString: "63", }, }, - Named: nil, + Named: []NamedArgument{}, }, TrailingComma: false, TailStrict: false, @@ -185433,7 +187130,7 @@ var StdAst = &DesugaredObject{ }, file: nil, }, - context: p9, + context: p68, freeVariables: Identifiers{ "std", }, @@ -185460,7 +187157,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13343, + context: p17200, freeVariables: Identifiers{ "f", "r", @@ -185484,7 +187181,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13347, + context: p17204, freeVariables: Identifiers{ "f", "std", @@ -185504,7 +187201,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13347, + context: p17204, freeVariables: Identifiers{ "std", }, @@ -185523,7 +187220,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13347, + context: p17204, freeVariables: Identifiers{ "std", }, @@ -185569,7 +187266,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13356, + context: p17213, freeVariables: Identifiers{ "f", }, @@ -185590,7 +187287,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13356, + context: p17213, freeVariables: nil, }, Value: "/", @@ -185620,7 +187317,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13343, + context: p17200, freeVariables: Identifiers{ "arr", "r", @@ -185641,7 +187338,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13343, + context: p17200, freeVariables: Identifiers{ "std", }, @@ -185660,7 +187357,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13343, + context: p17200, freeVariables: Identifiers{ "std", }, @@ -185706,7 +187403,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13368, + context: p17225, freeVariables: nil, }, Value: "/", @@ -185727,7 +187424,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13368, + context: p17225, freeVariables: Identifiers{ "arr", "r", @@ -185748,7 +187445,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13368, + context: p17225, freeVariables: Identifiers{ "arr", "std", @@ -185768,7 +187465,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13368, + context: p17225, freeVariables: Identifiers{ "std", }, @@ -185787,7 +187484,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13368, + context: p17225, freeVariables: Identifiers{ "std", }, @@ -185833,7 +187530,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13380, + context: p17237, freeVariables: Identifiers{ "arr", "std", @@ -185853,7 +187550,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13380, + context: p17237, freeVariables: Identifiers{ "arr", "std", @@ -185873,7 +187570,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13380, + context: p17237, freeVariables: Identifiers{ "std", }, @@ -185892,7 +187589,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13380, + context: p17237, freeVariables: Identifiers{ "std", }, @@ -185938,7 +187635,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13391, + context: p17248, freeVariables: Identifiers{ "arr", }, @@ -185966,7 +187663,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13380, + context: p17237, freeVariables: nil, }, Value: float64(1), @@ -185987,7 +187684,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13380, + context: p17237, freeVariables: Identifiers{ "arr", }, @@ -186013,7 +187710,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13398, + context: p17255, freeVariables: Identifiers{ "arr", "i", @@ -186033,7 +187730,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13398, + context: p17255, freeVariables: Identifiers{ "arr", }, @@ -186054,7 +187751,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13398, + context: p17255, freeVariables: Identifiers{ "i", }, @@ -186085,7 +187782,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13368, + context: p17225, freeVariables: Identifiers{ "r", }, @@ -186105,7 +187802,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13408, + context: p17265, freeVariables: Identifiers{ "r", }, @@ -186184,7 +187881,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p17271, freeVariables: nil, }, }, @@ -186206,7 +187903,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p17273, freeVariables: nil, }, Value: "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/", @@ -186448,7 +188145,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p17297, freeVariables: Identifiers{ "base64_table", "i", @@ -186472,7 +188169,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p17301, freeVariables: Identifiers{ "base64_table", "i", @@ -186492,7 +188189,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p17304, freeVariables: Identifiers{ "base64_table", }, @@ -186513,7 +188210,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p17307, freeVariables: Identifiers{ "i", }, @@ -186536,7 +188233,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p43, + context: p17310, freeVariables: Identifiers{ "i", }, @@ -186565,7 +188262,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p17313, freeVariables: Identifiers{ "std", }, @@ -186584,7 +188281,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p17316, freeVariables: Identifiers{ "std", }, @@ -186603,7 +188300,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p9, + context: p17319, freeVariables: Identifiers{ "std", }, @@ -186649,7 +188346,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p54, + context: p17324, freeVariables: nil, }, Value: float64(0), @@ -186669,14 +188366,14 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p54, + context: p17326, freeVariables: nil, }, Value: float64(63), OriginalString: "63", }, }, - Named: nil, + Named: []NamedArgument{}, }, TrailingComma: false, TailStrict: false, @@ -186732,7 +188429,7 @@ var StdAst = &DesugaredObject{ }, file: nil, }, - context: p9, + context: p68, freeVariables: Identifiers{ "$", "std", @@ -186759,7 +188456,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13416, + context: p17332, freeVariables: Identifiers{ "$", "a", @@ -186783,7 +188480,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13420, + context: p17336, freeVariables: Identifiers{ "std", }, @@ -186809,7 +188506,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13424, + context: p17340, freeVariables: Identifiers{ "b", "std", @@ -186832,7 +188529,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13428, + context: p17344, freeVariables: Identifiers{ "b", "std", @@ -186852,7 +188549,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13428, + context: p17344, freeVariables: Identifiers{ "std", }, @@ -186871,7 +188568,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13428, + context: p17344, freeVariables: Identifiers{ "std", }, @@ -186917,7 +188614,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13437, + context: p17353, freeVariables: Identifiers{ "b", }, @@ -186947,7 +188644,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13424, + context: p17340, freeVariables: Identifiers{ "b", "std", @@ -187053,7 +188750,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13424, + context: p17340, freeVariables: Identifiers{ "b", }, @@ -187074,7 +188771,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13424, + context: p17340, freeVariables: nil, }, }, @@ -187098,7 +188795,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13424, + context: p17340, freeVariables: nil, }, Value: false, @@ -187117,7 +188814,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13424, + context: p17340, freeVariables: Identifiers{ "b", "std", @@ -187223,7 +188920,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13424, + context: p17340, freeVariables: Identifiers{ "t", }, @@ -187244,7 +188941,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13424, + context: p17340, freeVariables: nil, }, Value: "array", @@ -187271,7 +188968,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13424, + context: p17340, freeVariables: Identifiers{ "b", "std", @@ -187291,7 +188988,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13424, + context: p17340, freeVariables: Identifiers{ "b", "std", @@ -187311,7 +189008,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13424, + context: p17340, freeVariables: Identifiers{ "std", }, @@ -187330,7 +189027,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13424, + context: p17340, freeVariables: Identifiers{ "std", }, @@ -187376,7 +189073,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13477, + context: p17393, freeVariables: Identifiers{ "b", }, @@ -187404,7 +189101,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13424, + context: p17340, freeVariables: nil, }, Value: float64(0), @@ -187425,7 +189122,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13424, + context: p17340, freeVariables: Identifiers{ "b", "std", @@ -187531,7 +189228,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13424, + context: p17340, freeVariables: Identifiers{ "t", }, @@ -187552,7 +189249,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13424, + context: p17340, freeVariables: nil, }, Value: "object", @@ -187579,7 +189276,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13424, + context: p17340, freeVariables: Identifiers{ "b", "std", @@ -187599,7 +189296,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13424, + context: p17340, freeVariables: Identifiers{ "b", "std", @@ -187619,7 +189316,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13424, + context: p17340, freeVariables: Identifiers{ "std", }, @@ -187638,7 +189335,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13424, + context: p17340, freeVariables: Identifiers{ "std", }, @@ -187684,7 +189381,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13504, + context: p17420, freeVariables: Identifiers{ "b", }, @@ -187712,7 +189409,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13424, + context: p17340, freeVariables: nil, }, Value: float64(0), @@ -187733,7 +189430,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13424, + context: p17340, freeVariables: nil, }, Value: true, @@ -187760,7 +189457,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13416, + context: p17332, freeVariables: Identifiers{ "$", "a", @@ -187785,7 +189482,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13512, + context: p17428, freeVariables: Identifiers{ "a", "std", @@ -187805,7 +189502,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13512, + context: p17428, freeVariables: Identifiers{ "std", }, @@ -187824,7 +189521,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13512, + context: p17428, freeVariables: Identifiers{ "std", }, @@ -187870,7 +189567,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13521, + context: p17437, freeVariables: Identifiers{ "a", }, @@ -187900,7 +189597,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13416, + context: p17332, freeVariables: Identifiers{ "$", "a", @@ -188008,7 +189705,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13416, + context: p17332, freeVariables: Identifiers{ "t", }, @@ -188029,7 +189726,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13416, + context: p17332, freeVariables: nil, }, Value: "array", @@ -188193,7 +189890,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13416, + context: p17332, freeVariables: Identifiers{ "$", "isContent", @@ -188214,7 +189911,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13416, + context: p17332, freeVariables: Identifiers{ "isContent", }, @@ -188237,7 +189934,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13555, + context: p17471, freeVariables: Identifiers{ "$", "x", @@ -188257,7 +189954,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13555, + context: p17471, freeVariables: Identifiers{ "$", }, @@ -188276,7 +189973,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13555, + context: p17471, freeVariables: Identifiers{ "$", }, @@ -188322,7 +190019,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13564, + context: p17480, freeVariables: Identifiers{ "x", }, @@ -188376,7 +190073,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13570, + context: p17486, freeVariables: Identifiers{ "std", "x", @@ -188396,7 +190093,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13570, + context: p17486, freeVariables: Identifiers{ "std", }, @@ -188415,7 +190112,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13570, + context: p17486, freeVariables: Identifiers{ "std", }, @@ -188461,7 +190158,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13579, + context: p17495, freeVariables: Identifiers{ "x", }, @@ -188513,7 +190210,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13416, + context: p17332, freeVariables: Identifiers{ "a", }, @@ -188540,7 +190237,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13416, + context: p17332, freeVariables: Identifiers{ "$", "a", @@ -188648,7 +190345,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13416, + context: p17332, freeVariables: Identifiers{ "t", }, @@ -188669,7 +190366,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13416, + context: p17332, freeVariables: nil, }, Value: "object", @@ -188922,7 +190619,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13416, + context: p17332, freeVariables: Identifiers{ "a", "isContent", @@ -188944,7 +190641,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13416, + context: p17332, freeVariables: Identifiers{ "isContent", }, @@ -188967,7 +190664,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13624, + context: p17540, freeVariables: Identifiers{ "a", "std", @@ -188988,7 +190685,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13624, + context: p17540, freeVariables: Identifiers{ "std", }, @@ -189007,7 +190704,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13624, + context: p17540, freeVariables: Identifiers{ "std", }, @@ -189053,7 +190750,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13633, + context: p17549, freeVariables: Identifiers{ "a", "x", @@ -189073,7 +190770,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13633, + context: p17549, freeVariables: Identifiers{ "a", }, @@ -189094,7 +190791,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13633, + context: p17549, freeVariables: Identifiers{ "x", }, @@ -189151,7 +190848,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13416, + context: p17332, freeVariables: Identifiers{ "$", "a", @@ -189176,7 +190873,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13416, + context: p17332, freeVariables: Identifiers{ "x", }, @@ -189197,7 +190894,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13648, + context: p17564, freeVariables: Identifiers{ "$", "a", @@ -189218,7 +190915,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13648, + context: p17564, freeVariables: Identifiers{ "$", }, @@ -189237,7 +190934,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13648, + context: p17564, freeVariables: Identifiers{ "$", }, @@ -189283,7 +190980,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13657, + context: p17573, freeVariables: Identifiers{ "a", "x", @@ -189303,7 +191000,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13657, + context: p17573, freeVariables: Identifiers{ "a", }, @@ -189324,7 +191021,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13657, + context: p17573, freeVariables: Identifiers{ "x", }, @@ -189382,7 +191079,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13416, + context: p17332, freeVariables: Identifiers{ "a", "std", @@ -189402,7 +191099,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13416, + context: p17332, freeVariables: Identifiers{ "std", }, @@ -189421,7 +191118,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13416, + context: p17332, freeVariables: Identifiers{ "std", }, @@ -189467,7 +191164,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13673, + context: p17589, freeVariables: Identifiers{ "a", }, @@ -189506,7 +191203,7 @@ var StdAst = &DesugaredObject{ }, file: p1, }, - context: p13416, + context: p17332, freeVariables: Identifiers{ "a", }, diff --git a/vendor/github.com/google/go-jsonnet/vm.go b/vendor/github.com/google/go-jsonnet/vm.go index 2bfe0df4c2f..4a254fb3e31 100644 --- a/vendor/github.com/google/go-jsonnet/vm.go +++ b/vendor/github.com/google/go-jsonnet/vm.go @@ -191,5 +191,5 @@ func SnippetToAST(filename string, snippet string) (ast.Node, error) { // Version returns the Jsonnet version number. func Version() string { - return "v0.9.5" + return "v0.10.0" } diff --git a/vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.go b/vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.go index 0e32451a320..5351f36f36c 100644 --- a/vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.go +++ b/vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.go @@ -7106,20 +7106,20 @@ func (m *Any) ToRawInfo() interface{} { func (m *ApiKeySecurity) ToRawInfo() interface{} { info := yaml.MapSlice{} if m.Type != "" { - info = append(info, yaml.MapItem{"type", m.Type}) + info = append(info, yaml.MapItem{Key: "type", Value: m.Type}) } if m.Name != "" { - info = append(info, yaml.MapItem{"name", m.Name}) + info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) } if m.In != "" { - info = append(info, yaml.MapItem{"in", m.In}) + info = append(info, yaml.MapItem{Key: "in", Value: m.In}) } if m.Description != "" { - info = append(info, yaml.MapItem{"description", m.Description}) + info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) } if m.VendorExtension != nil { for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) } } // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} @@ -7130,14 +7130,14 @@ func (m *ApiKeySecurity) ToRawInfo() interface{} { func (m *BasicAuthenticationSecurity) ToRawInfo() interface{} { info := yaml.MapSlice{} if m.Type != "" { - info = append(info, yaml.MapItem{"type", m.Type}) + info = append(info, yaml.MapItem{Key: "type", Value: m.Type}) } if m.Description != "" { - info = append(info, yaml.MapItem{"description", m.Description}) + info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) } if m.VendorExtension != nil { for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) } } // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} @@ -7148,24 +7148,24 @@ func (m *BasicAuthenticationSecurity) ToRawInfo() interface{} { func (m *BodyParameter) ToRawInfo() interface{} { info := yaml.MapSlice{} if m.Description != "" { - info = append(info, yaml.MapItem{"description", m.Description}) + info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) } if m.Name != "" { - info = append(info, yaml.MapItem{"name", m.Name}) + info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) } if m.In != "" { - info = append(info, yaml.MapItem{"in", m.In}) + info = append(info, yaml.MapItem{Key: "in", Value: m.In}) } if m.Required != false { - info = append(info, yaml.MapItem{"required", m.Required}) + info = append(info, yaml.MapItem{Key: "required", Value: m.Required}) } if m.Schema != nil { - info = append(info, yaml.MapItem{"schema", m.Schema.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: "schema", Value: m.Schema.ToRawInfo()}) } // &{Name:schema Type:Schema StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if m.VendorExtension != nil { for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) } } // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} @@ -7176,17 +7176,17 @@ func (m *BodyParameter) ToRawInfo() interface{} { func (m *Contact) ToRawInfo() interface{} { info := yaml.MapSlice{} if m.Name != "" { - info = append(info, yaml.MapItem{"name", m.Name}) + info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) } if m.Url != "" { - info = append(info, yaml.MapItem{"url", m.Url}) + info = append(info, yaml.MapItem{Key: "url", Value: m.Url}) } if m.Email != "" { - info = append(info, yaml.MapItem{"email", m.Email}) + info = append(info, yaml.MapItem{Key: "email", Value: m.Email}) } if m.VendorExtension != nil { for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) } } // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} @@ -7198,7 +7198,7 @@ func (m *Default) ToRawInfo() interface{} { info := yaml.MapSlice{} if m.AdditionalProperties != nil { for _, item := range m.AdditionalProperties { - info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) } } // &{Name:additionalProperties Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern: Implicit:false Description:} @@ -7210,7 +7210,7 @@ func (m *Definitions) ToRawInfo() interface{} { info := yaml.MapSlice{} if m.AdditionalProperties != nil { for _, item := range m.AdditionalProperties { - info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) } } // &{Name:additionalProperties Type:NamedSchema StringEnumValues:[] MapType:Schema Repeated:true Pattern: Implicit:true Description:} @@ -7221,41 +7221,41 @@ func (m *Definitions) ToRawInfo() interface{} { func (m *Document) ToRawInfo() interface{} { info := yaml.MapSlice{} if m.Swagger != "" { - info = append(info, yaml.MapItem{"swagger", m.Swagger}) + info = append(info, yaml.MapItem{Key: "swagger", Value: m.Swagger}) } if m.Info != nil { - info = append(info, yaml.MapItem{"info", m.Info.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: "info", Value: m.Info.ToRawInfo()}) } // &{Name:info Type:Info StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if m.Host != "" { - info = append(info, yaml.MapItem{"host", m.Host}) + info = append(info, yaml.MapItem{Key: "host", Value: m.Host}) } if m.BasePath != "" { - info = append(info, yaml.MapItem{"basePath", m.BasePath}) + info = append(info, yaml.MapItem{Key: "basePath", Value: m.BasePath}) } if len(m.Schemes) != 0 { - info = append(info, yaml.MapItem{"schemes", m.Schemes}) + info = append(info, yaml.MapItem{Key: "schemes", Value: m.Schemes}) } if len(m.Consumes) != 0 { - info = append(info, yaml.MapItem{"consumes", m.Consumes}) + info = append(info, yaml.MapItem{Key: "consumes", Value: m.Consumes}) } if len(m.Produces) != 0 { - info = append(info, yaml.MapItem{"produces", m.Produces}) + info = append(info, yaml.MapItem{Key: "produces", Value: m.Produces}) } if m.Paths != nil { - info = append(info, yaml.MapItem{"paths", m.Paths.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: "paths", Value: m.Paths.ToRawInfo()}) } // &{Name:paths Type:Paths StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if m.Definitions != nil { - info = append(info, yaml.MapItem{"definitions", m.Definitions.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: "definitions", Value: m.Definitions.ToRawInfo()}) } // &{Name:definitions Type:Definitions StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if m.Parameters != nil { - info = append(info, yaml.MapItem{"parameters", m.Parameters.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: "parameters", Value: m.Parameters.ToRawInfo()}) } // &{Name:parameters Type:ParameterDefinitions StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if m.Responses != nil { - info = append(info, yaml.MapItem{"responses", m.Responses.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: "responses", Value: m.Responses.ToRawInfo()}) } // &{Name:responses Type:ResponseDefinitions StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if len(m.Security) != 0 { @@ -7263,11 +7263,11 @@ func (m *Document) ToRawInfo() interface{} { for _, item := range m.Security { items = append(items, item.ToRawInfo()) } - info = append(info, yaml.MapItem{"security", items}) + info = append(info, yaml.MapItem{Key: "security", Value: items}) } // &{Name:security Type:SecurityRequirement StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:} if m.SecurityDefinitions != nil { - info = append(info, yaml.MapItem{"securityDefinitions", m.SecurityDefinitions.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: "securityDefinitions", Value: m.SecurityDefinitions.ToRawInfo()}) } // &{Name:securityDefinitions Type:SecurityDefinitions StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if len(m.Tags) != 0 { @@ -7275,16 +7275,16 @@ func (m *Document) ToRawInfo() interface{} { for _, item := range m.Tags { items = append(items, item.ToRawInfo()) } - info = append(info, yaml.MapItem{"tags", items}) + info = append(info, yaml.MapItem{Key: "tags", Value: items}) } // &{Name:tags Type:Tag StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:} if m.ExternalDocs != nil { - info = append(info, yaml.MapItem{"externalDocs", m.ExternalDocs.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: "externalDocs", Value: m.ExternalDocs.ToRawInfo()}) } // &{Name:externalDocs Type:ExternalDocs StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if m.VendorExtension != nil { for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) } } // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} @@ -7296,7 +7296,7 @@ func (m *Examples) ToRawInfo() interface{} { info := yaml.MapSlice{} if m.AdditionalProperties != nil { for _, item := range m.AdditionalProperties { - info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) } } // &{Name:additionalProperties Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern: Implicit:true Description:} @@ -7307,14 +7307,14 @@ func (m *Examples) ToRawInfo() interface{} { func (m *ExternalDocs) ToRawInfo() interface{} { info := yaml.MapSlice{} if m.Description != "" { - info = append(info, yaml.MapItem{"description", m.Description}) + info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) } if m.Url != "" { - info = append(info, yaml.MapItem{"url", m.Url}) + info = append(info, yaml.MapItem{Key: "url", Value: m.Url}) } if m.VendorExtension != nil { for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) } } // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} @@ -7325,38 +7325,38 @@ func (m *ExternalDocs) ToRawInfo() interface{} { func (m *FileSchema) ToRawInfo() interface{} { info := yaml.MapSlice{} if m.Format != "" { - info = append(info, yaml.MapItem{"format", m.Format}) + info = append(info, yaml.MapItem{Key: "format", Value: m.Format}) } if m.Title != "" { - info = append(info, yaml.MapItem{"title", m.Title}) + info = append(info, yaml.MapItem{Key: "title", Value: m.Title}) } if m.Description != "" { - info = append(info, yaml.MapItem{"description", m.Description}) + info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) } if m.Default != nil { - info = append(info, yaml.MapItem{"default", m.Default.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: "default", Value: m.Default.ToRawInfo()}) } // &{Name:default Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if len(m.Required) != 0 { - info = append(info, yaml.MapItem{"required", m.Required}) + info = append(info, yaml.MapItem{Key: "required", Value: m.Required}) } if m.Type != "" { - info = append(info, yaml.MapItem{"type", m.Type}) + info = append(info, yaml.MapItem{Key: "type", Value: m.Type}) } if m.ReadOnly != false { - info = append(info, yaml.MapItem{"readOnly", m.ReadOnly}) + info = append(info, yaml.MapItem{Key: "readOnly", Value: m.ReadOnly}) } if m.ExternalDocs != nil { - info = append(info, yaml.MapItem{"externalDocs", m.ExternalDocs.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: "externalDocs", Value: m.ExternalDocs.ToRawInfo()}) } // &{Name:externalDocs Type:ExternalDocs StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if m.Example != nil { - info = append(info, yaml.MapItem{"example", m.Example.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: "example", Value: m.Example.ToRawInfo()}) } // &{Name:example Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if m.VendorExtension != nil { for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) } } // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} @@ -7367,81 +7367,81 @@ func (m *FileSchema) ToRawInfo() interface{} { func (m *FormDataParameterSubSchema) ToRawInfo() interface{} { info := yaml.MapSlice{} if m.Required != false { - info = append(info, yaml.MapItem{"required", m.Required}) + info = append(info, yaml.MapItem{Key: "required", Value: m.Required}) } if m.In != "" { - info = append(info, yaml.MapItem{"in", m.In}) + info = append(info, yaml.MapItem{Key: "in", Value: m.In}) } if m.Description != "" { - info = append(info, yaml.MapItem{"description", m.Description}) + info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) } if m.Name != "" { - info = append(info, yaml.MapItem{"name", m.Name}) + info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) } if m.AllowEmptyValue != false { - info = append(info, yaml.MapItem{"allowEmptyValue", m.AllowEmptyValue}) + info = append(info, yaml.MapItem{Key: "allowEmptyValue", Value: m.AllowEmptyValue}) } if m.Type != "" { - info = append(info, yaml.MapItem{"type", m.Type}) + info = append(info, yaml.MapItem{Key: "type", Value: m.Type}) } if m.Format != "" { - info = append(info, yaml.MapItem{"format", m.Format}) + info = append(info, yaml.MapItem{Key: "format", Value: m.Format}) } if m.Items != nil { - info = append(info, yaml.MapItem{"items", m.Items.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: "items", Value: m.Items.ToRawInfo()}) } // &{Name:items Type:PrimitivesItems StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if m.CollectionFormat != "" { - info = append(info, yaml.MapItem{"collectionFormat", m.CollectionFormat}) + info = append(info, yaml.MapItem{Key: "collectionFormat", Value: m.CollectionFormat}) } if m.Default != nil { - info = append(info, yaml.MapItem{"default", m.Default.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: "default", Value: m.Default.ToRawInfo()}) } // &{Name:default Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if m.Maximum != 0.0 { - info = append(info, yaml.MapItem{"maximum", m.Maximum}) + info = append(info, yaml.MapItem{Key: "maximum", Value: m.Maximum}) } if m.ExclusiveMaximum != false { - info = append(info, yaml.MapItem{"exclusiveMaximum", m.ExclusiveMaximum}) + info = append(info, yaml.MapItem{Key: "exclusiveMaximum", Value: m.ExclusiveMaximum}) } if m.Minimum != 0.0 { - info = append(info, yaml.MapItem{"minimum", m.Minimum}) + info = append(info, yaml.MapItem{Key: "minimum", Value: m.Minimum}) } if m.ExclusiveMinimum != false { - info = append(info, yaml.MapItem{"exclusiveMinimum", m.ExclusiveMinimum}) + info = append(info, yaml.MapItem{Key: "exclusiveMinimum", Value: m.ExclusiveMinimum}) } if m.MaxLength != 0 { - info = append(info, yaml.MapItem{"maxLength", m.MaxLength}) + info = append(info, yaml.MapItem{Key: "maxLength", Value: m.MaxLength}) } if m.MinLength != 0 { - info = append(info, yaml.MapItem{"minLength", m.MinLength}) + info = append(info, yaml.MapItem{Key: "minLength", Value: m.MinLength}) } if m.Pattern != "" { - info = append(info, yaml.MapItem{"pattern", m.Pattern}) + info = append(info, yaml.MapItem{Key: "pattern", Value: m.Pattern}) } if m.MaxItems != 0 { - info = append(info, yaml.MapItem{"maxItems", m.MaxItems}) + info = append(info, yaml.MapItem{Key: "maxItems", Value: m.MaxItems}) } if m.MinItems != 0 { - info = append(info, yaml.MapItem{"minItems", m.MinItems}) + info = append(info, yaml.MapItem{Key: "minItems", Value: m.MinItems}) } if m.UniqueItems != false { - info = append(info, yaml.MapItem{"uniqueItems", m.UniqueItems}) + info = append(info, yaml.MapItem{Key: "uniqueItems", Value: m.UniqueItems}) } if len(m.Enum) != 0 { items := make([]interface{}, 0) for _, item := range m.Enum { items = append(items, item.ToRawInfo()) } - info = append(info, yaml.MapItem{"enum", items}) + info = append(info, yaml.MapItem{Key: "enum", Value: items}) } // &{Name:enum Type:Any StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:} if m.MultipleOf != 0.0 { - info = append(info, yaml.MapItem{"multipleOf", m.MultipleOf}) + info = append(info, yaml.MapItem{Key: "multipleOf", Value: m.MultipleOf}) } if m.VendorExtension != nil { for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) } } // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} @@ -7452,69 +7452,69 @@ func (m *FormDataParameterSubSchema) ToRawInfo() interface{} { func (m *Header) ToRawInfo() interface{} { info := yaml.MapSlice{} if m.Type != "" { - info = append(info, yaml.MapItem{"type", m.Type}) + info = append(info, yaml.MapItem{Key: "type", Value: m.Type}) } if m.Format != "" { - info = append(info, yaml.MapItem{"format", m.Format}) + info = append(info, yaml.MapItem{Key: "format", Value: m.Format}) } if m.Items != nil { - info = append(info, yaml.MapItem{"items", m.Items.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: "items", Value: m.Items.ToRawInfo()}) } // &{Name:items Type:PrimitivesItems StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if m.CollectionFormat != "" { - info = append(info, yaml.MapItem{"collectionFormat", m.CollectionFormat}) + info = append(info, yaml.MapItem{Key: "collectionFormat", Value: m.CollectionFormat}) } if m.Default != nil { - info = append(info, yaml.MapItem{"default", m.Default.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: "default", Value: m.Default.ToRawInfo()}) } // &{Name:default Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if m.Maximum != 0.0 { - info = append(info, yaml.MapItem{"maximum", m.Maximum}) + info = append(info, yaml.MapItem{Key: "maximum", Value: m.Maximum}) } if m.ExclusiveMaximum != false { - info = append(info, yaml.MapItem{"exclusiveMaximum", m.ExclusiveMaximum}) + info = append(info, yaml.MapItem{Key: "exclusiveMaximum", Value: m.ExclusiveMaximum}) } if m.Minimum != 0.0 { - info = append(info, yaml.MapItem{"minimum", m.Minimum}) + info = append(info, yaml.MapItem{Key: "minimum", Value: m.Minimum}) } if m.ExclusiveMinimum != false { - info = append(info, yaml.MapItem{"exclusiveMinimum", m.ExclusiveMinimum}) + info = append(info, yaml.MapItem{Key: "exclusiveMinimum", Value: m.ExclusiveMinimum}) } if m.MaxLength != 0 { - info = append(info, yaml.MapItem{"maxLength", m.MaxLength}) + info = append(info, yaml.MapItem{Key: "maxLength", Value: m.MaxLength}) } if m.MinLength != 0 { - info = append(info, yaml.MapItem{"minLength", m.MinLength}) + info = append(info, yaml.MapItem{Key: "minLength", Value: m.MinLength}) } if m.Pattern != "" { - info = append(info, yaml.MapItem{"pattern", m.Pattern}) + info = append(info, yaml.MapItem{Key: "pattern", Value: m.Pattern}) } if m.MaxItems != 0 { - info = append(info, yaml.MapItem{"maxItems", m.MaxItems}) + info = append(info, yaml.MapItem{Key: "maxItems", Value: m.MaxItems}) } if m.MinItems != 0 { - info = append(info, yaml.MapItem{"minItems", m.MinItems}) + info = append(info, yaml.MapItem{Key: "minItems", Value: m.MinItems}) } if m.UniqueItems != false { - info = append(info, yaml.MapItem{"uniqueItems", m.UniqueItems}) + info = append(info, yaml.MapItem{Key: "uniqueItems", Value: m.UniqueItems}) } if len(m.Enum) != 0 { items := make([]interface{}, 0) for _, item := range m.Enum { items = append(items, item.ToRawInfo()) } - info = append(info, yaml.MapItem{"enum", items}) + info = append(info, yaml.MapItem{Key: "enum", Value: items}) } // &{Name:enum Type:Any StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:} if m.MultipleOf != 0.0 { - info = append(info, yaml.MapItem{"multipleOf", m.MultipleOf}) + info = append(info, yaml.MapItem{Key: "multipleOf", Value: m.MultipleOf}) } if m.Description != "" { - info = append(info, yaml.MapItem{"description", m.Description}) + info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) } if m.VendorExtension != nil { for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) } } // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} @@ -7525,78 +7525,78 @@ func (m *Header) ToRawInfo() interface{} { func (m *HeaderParameterSubSchema) ToRawInfo() interface{} { info := yaml.MapSlice{} if m.Required != false { - info = append(info, yaml.MapItem{"required", m.Required}) + info = append(info, yaml.MapItem{Key: "required", Value: m.Required}) } if m.In != "" { - info = append(info, yaml.MapItem{"in", m.In}) + info = append(info, yaml.MapItem{Key: "in", Value: m.In}) } if m.Description != "" { - info = append(info, yaml.MapItem{"description", m.Description}) + info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) } if m.Name != "" { - info = append(info, yaml.MapItem{"name", m.Name}) + info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) } if m.Type != "" { - info = append(info, yaml.MapItem{"type", m.Type}) + info = append(info, yaml.MapItem{Key: "type", Value: m.Type}) } if m.Format != "" { - info = append(info, yaml.MapItem{"format", m.Format}) + info = append(info, yaml.MapItem{Key: "format", Value: m.Format}) } if m.Items != nil { - info = append(info, yaml.MapItem{"items", m.Items.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: "items", Value: m.Items.ToRawInfo()}) } // &{Name:items Type:PrimitivesItems StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if m.CollectionFormat != "" { - info = append(info, yaml.MapItem{"collectionFormat", m.CollectionFormat}) + info = append(info, yaml.MapItem{Key: "collectionFormat", Value: m.CollectionFormat}) } if m.Default != nil { - info = append(info, yaml.MapItem{"default", m.Default.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: "default", Value: m.Default.ToRawInfo()}) } // &{Name:default Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if m.Maximum != 0.0 { - info = append(info, yaml.MapItem{"maximum", m.Maximum}) + info = append(info, yaml.MapItem{Key: "maximum", Value: m.Maximum}) } if m.ExclusiveMaximum != false { - info = append(info, yaml.MapItem{"exclusiveMaximum", m.ExclusiveMaximum}) + info = append(info, yaml.MapItem{Key: "exclusiveMaximum", Value: m.ExclusiveMaximum}) } if m.Minimum != 0.0 { - info = append(info, yaml.MapItem{"minimum", m.Minimum}) + info = append(info, yaml.MapItem{Key: "minimum", Value: m.Minimum}) } if m.ExclusiveMinimum != false { - info = append(info, yaml.MapItem{"exclusiveMinimum", m.ExclusiveMinimum}) + info = append(info, yaml.MapItem{Key: "exclusiveMinimum", Value: m.ExclusiveMinimum}) } if m.MaxLength != 0 { - info = append(info, yaml.MapItem{"maxLength", m.MaxLength}) + info = append(info, yaml.MapItem{Key: "maxLength", Value: m.MaxLength}) } if m.MinLength != 0 { - info = append(info, yaml.MapItem{"minLength", m.MinLength}) + info = append(info, yaml.MapItem{Key: "minLength", Value: m.MinLength}) } if m.Pattern != "" { - info = append(info, yaml.MapItem{"pattern", m.Pattern}) + info = append(info, yaml.MapItem{Key: "pattern", Value: m.Pattern}) } if m.MaxItems != 0 { - info = append(info, yaml.MapItem{"maxItems", m.MaxItems}) + info = append(info, yaml.MapItem{Key: "maxItems", Value: m.MaxItems}) } if m.MinItems != 0 { - info = append(info, yaml.MapItem{"minItems", m.MinItems}) + info = append(info, yaml.MapItem{Key: "minItems", Value: m.MinItems}) } if m.UniqueItems != false { - info = append(info, yaml.MapItem{"uniqueItems", m.UniqueItems}) + info = append(info, yaml.MapItem{Key: "uniqueItems", Value: m.UniqueItems}) } if len(m.Enum) != 0 { items := make([]interface{}, 0) for _, item := range m.Enum { items = append(items, item.ToRawInfo()) } - info = append(info, yaml.MapItem{"enum", items}) + info = append(info, yaml.MapItem{Key: "enum", Value: items}) } // &{Name:enum Type:Any StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:} if m.MultipleOf != 0.0 { - info = append(info, yaml.MapItem{"multipleOf", m.MultipleOf}) + info = append(info, yaml.MapItem{Key: "multipleOf", Value: m.MultipleOf}) } if m.VendorExtension != nil { for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) } } // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} @@ -7608,7 +7608,7 @@ func (m *Headers) ToRawInfo() interface{} { info := yaml.MapSlice{} if m.AdditionalProperties != nil { for _, item := range m.AdditionalProperties { - info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) } } // &{Name:additionalProperties Type:NamedHeader StringEnumValues:[] MapType:Header Repeated:true Pattern: Implicit:true Description:} @@ -7619,28 +7619,28 @@ func (m *Headers) ToRawInfo() interface{} { func (m *Info) ToRawInfo() interface{} { info := yaml.MapSlice{} if m.Title != "" { - info = append(info, yaml.MapItem{"title", m.Title}) + info = append(info, yaml.MapItem{Key: "title", Value: m.Title}) } if m.Version != "" { - info = append(info, yaml.MapItem{"version", m.Version}) + info = append(info, yaml.MapItem{Key: "version", Value: m.Version}) } if m.Description != "" { - info = append(info, yaml.MapItem{"description", m.Description}) + info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) } if m.TermsOfService != "" { - info = append(info, yaml.MapItem{"termsOfService", m.TermsOfService}) + info = append(info, yaml.MapItem{Key: "termsOfService", Value: m.TermsOfService}) } if m.Contact != nil { - info = append(info, yaml.MapItem{"contact", m.Contact.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: "contact", Value: m.Contact.ToRawInfo()}) } // &{Name:contact Type:Contact StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if m.License != nil { - info = append(info, yaml.MapItem{"license", m.License.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: "license", Value: m.License.ToRawInfo()}) } // &{Name:license Type:License StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if m.VendorExtension != nil { for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) } } // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} @@ -7655,7 +7655,7 @@ func (m *ItemsItem) ToRawInfo() interface{} { for _, item := range m.Schema { items = append(items, item.ToRawInfo()) } - info = append(info, yaml.MapItem{"schema", items}) + info = append(info, yaml.MapItem{Key: "schema", Value: items}) } // &{Name:schema Type:Schema StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:} return info @@ -7665,10 +7665,10 @@ func (m *ItemsItem) ToRawInfo() interface{} { func (m *JsonReference) ToRawInfo() interface{} { info := yaml.MapSlice{} if m.XRef != "" { - info = append(info, yaml.MapItem{"$ref", m.XRef}) + info = append(info, yaml.MapItem{Key: "$ref", Value: m.XRef}) } if m.Description != "" { - info = append(info, yaml.MapItem{"description", m.Description}) + info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) } return info } @@ -7677,14 +7677,14 @@ func (m *JsonReference) ToRawInfo() interface{} { func (m *License) ToRawInfo() interface{} { info := yaml.MapSlice{} if m.Name != "" { - info = append(info, yaml.MapItem{"name", m.Name}) + info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) } if m.Url != "" { - info = append(info, yaml.MapItem{"url", m.Url}) + info = append(info, yaml.MapItem{Key: "url", Value: m.Url}) } if m.VendorExtension != nil { for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) } } // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} @@ -7695,7 +7695,7 @@ func (m *License) ToRawInfo() interface{} { func (m *NamedAny) ToRawInfo() interface{} { info := yaml.MapSlice{} if m.Name != "" { - info = append(info, yaml.MapItem{"name", m.Name}) + info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) } // &{Name:value Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} return info @@ -7705,7 +7705,7 @@ func (m *NamedAny) ToRawInfo() interface{} { func (m *NamedHeader) ToRawInfo() interface{} { info := yaml.MapSlice{} if m.Name != "" { - info = append(info, yaml.MapItem{"name", m.Name}) + info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) } // &{Name:value Type:Header StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} return info @@ -7715,7 +7715,7 @@ func (m *NamedHeader) ToRawInfo() interface{} { func (m *NamedParameter) ToRawInfo() interface{} { info := yaml.MapSlice{} if m.Name != "" { - info = append(info, yaml.MapItem{"name", m.Name}) + info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) } // &{Name:value Type:Parameter StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} return info @@ -7725,7 +7725,7 @@ func (m *NamedParameter) ToRawInfo() interface{} { func (m *NamedPathItem) ToRawInfo() interface{} { info := yaml.MapSlice{} if m.Name != "" { - info = append(info, yaml.MapItem{"name", m.Name}) + info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) } // &{Name:value Type:PathItem StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} return info @@ -7735,7 +7735,7 @@ func (m *NamedPathItem) ToRawInfo() interface{} { func (m *NamedResponse) ToRawInfo() interface{} { info := yaml.MapSlice{} if m.Name != "" { - info = append(info, yaml.MapItem{"name", m.Name}) + info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) } // &{Name:value Type:Response StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} return info @@ -7745,7 +7745,7 @@ func (m *NamedResponse) ToRawInfo() interface{} { func (m *NamedResponseValue) ToRawInfo() interface{} { info := yaml.MapSlice{} if m.Name != "" { - info = append(info, yaml.MapItem{"name", m.Name}) + info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) } // &{Name:value Type:ResponseValue StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} return info @@ -7755,7 +7755,7 @@ func (m *NamedResponseValue) ToRawInfo() interface{} { func (m *NamedSchema) ToRawInfo() interface{} { info := yaml.MapSlice{} if m.Name != "" { - info = append(info, yaml.MapItem{"name", m.Name}) + info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) } // &{Name:value Type:Schema StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} return info @@ -7765,7 +7765,7 @@ func (m *NamedSchema) ToRawInfo() interface{} { func (m *NamedSecurityDefinitionsItem) ToRawInfo() interface{} { info := yaml.MapSlice{} if m.Name != "" { - info = append(info, yaml.MapItem{"name", m.Name}) + info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) } // &{Name:value Type:SecurityDefinitionsItem StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} return info @@ -7775,10 +7775,10 @@ func (m *NamedSecurityDefinitionsItem) ToRawInfo() interface{} { func (m *NamedString) ToRawInfo() interface{} { info := yaml.MapSlice{} if m.Name != "" { - info = append(info, yaml.MapItem{"name", m.Name}) + info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) } if m.Value != "" { - info = append(info, yaml.MapItem{"value", m.Value}) + info = append(info, yaml.MapItem{Key: "value", Value: m.Value}) } return info } @@ -7787,7 +7787,7 @@ func (m *NamedString) ToRawInfo() interface{} { func (m *NamedStringArray) ToRawInfo() interface{} { info := yaml.MapSlice{} if m.Name != "" { - info = append(info, yaml.MapItem{"name", m.Name}) + info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) } // &{Name:value Type:StringArray StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} return info @@ -7824,27 +7824,27 @@ func (m *NonBodyParameter) ToRawInfo() interface{} { func (m *Oauth2AccessCodeSecurity) ToRawInfo() interface{} { info := yaml.MapSlice{} if m.Type != "" { - info = append(info, yaml.MapItem{"type", m.Type}) + info = append(info, yaml.MapItem{Key: "type", Value: m.Type}) } if m.Flow != "" { - info = append(info, yaml.MapItem{"flow", m.Flow}) + info = append(info, yaml.MapItem{Key: "flow", Value: m.Flow}) } if m.Scopes != nil { - info = append(info, yaml.MapItem{"scopes", m.Scopes.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: "scopes", Value: m.Scopes.ToRawInfo()}) } // &{Name:scopes Type:Oauth2Scopes StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if m.AuthorizationUrl != "" { - info = append(info, yaml.MapItem{"authorizationUrl", m.AuthorizationUrl}) + info = append(info, yaml.MapItem{Key: "authorizationUrl", Value: m.AuthorizationUrl}) } if m.TokenUrl != "" { - info = append(info, yaml.MapItem{"tokenUrl", m.TokenUrl}) + info = append(info, yaml.MapItem{Key: "tokenUrl", Value: m.TokenUrl}) } if m.Description != "" { - info = append(info, yaml.MapItem{"description", m.Description}) + info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) } if m.VendorExtension != nil { for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) } } // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} @@ -7855,24 +7855,24 @@ func (m *Oauth2AccessCodeSecurity) ToRawInfo() interface{} { func (m *Oauth2ApplicationSecurity) ToRawInfo() interface{} { info := yaml.MapSlice{} if m.Type != "" { - info = append(info, yaml.MapItem{"type", m.Type}) + info = append(info, yaml.MapItem{Key: "type", Value: m.Type}) } if m.Flow != "" { - info = append(info, yaml.MapItem{"flow", m.Flow}) + info = append(info, yaml.MapItem{Key: "flow", Value: m.Flow}) } if m.Scopes != nil { - info = append(info, yaml.MapItem{"scopes", m.Scopes.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: "scopes", Value: m.Scopes.ToRawInfo()}) } // &{Name:scopes Type:Oauth2Scopes StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if m.TokenUrl != "" { - info = append(info, yaml.MapItem{"tokenUrl", m.TokenUrl}) + info = append(info, yaml.MapItem{Key: "tokenUrl", Value: m.TokenUrl}) } if m.Description != "" { - info = append(info, yaml.MapItem{"description", m.Description}) + info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) } if m.VendorExtension != nil { for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) } } // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} @@ -7883,24 +7883,24 @@ func (m *Oauth2ApplicationSecurity) ToRawInfo() interface{} { func (m *Oauth2ImplicitSecurity) ToRawInfo() interface{} { info := yaml.MapSlice{} if m.Type != "" { - info = append(info, yaml.MapItem{"type", m.Type}) + info = append(info, yaml.MapItem{Key: "type", Value: m.Type}) } if m.Flow != "" { - info = append(info, yaml.MapItem{"flow", m.Flow}) + info = append(info, yaml.MapItem{Key: "flow", Value: m.Flow}) } if m.Scopes != nil { - info = append(info, yaml.MapItem{"scopes", m.Scopes.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: "scopes", Value: m.Scopes.ToRawInfo()}) } // &{Name:scopes Type:Oauth2Scopes StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if m.AuthorizationUrl != "" { - info = append(info, yaml.MapItem{"authorizationUrl", m.AuthorizationUrl}) + info = append(info, yaml.MapItem{Key: "authorizationUrl", Value: m.AuthorizationUrl}) } if m.Description != "" { - info = append(info, yaml.MapItem{"description", m.Description}) + info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) } if m.VendorExtension != nil { for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) } } // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} @@ -7911,24 +7911,24 @@ func (m *Oauth2ImplicitSecurity) ToRawInfo() interface{} { func (m *Oauth2PasswordSecurity) ToRawInfo() interface{} { info := yaml.MapSlice{} if m.Type != "" { - info = append(info, yaml.MapItem{"type", m.Type}) + info = append(info, yaml.MapItem{Key: "type", Value: m.Type}) } if m.Flow != "" { - info = append(info, yaml.MapItem{"flow", m.Flow}) + info = append(info, yaml.MapItem{Key: "flow", Value: m.Flow}) } if m.Scopes != nil { - info = append(info, yaml.MapItem{"scopes", m.Scopes.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: "scopes", Value: m.Scopes.ToRawInfo()}) } // &{Name:scopes Type:Oauth2Scopes StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if m.TokenUrl != "" { - info = append(info, yaml.MapItem{"tokenUrl", m.TokenUrl}) + info = append(info, yaml.MapItem{Key: "tokenUrl", Value: m.TokenUrl}) } if m.Description != "" { - info = append(info, yaml.MapItem{"description", m.Description}) + info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) } if m.VendorExtension != nil { for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) } } // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} @@ -7946,56 +7946,56 @@ func (m *Oauth2Scopes) ToRawInfo() interface{} { func (m *Operation) ToRawInfo() interface{} { info := yaml.MapSlice{} if len(m.Tags) != 0 { - info = append(info, yaml.MapItem{"tags", m.Tags}) + info = append(info, yaml.MapItem{Key: "tags", Value: m.Tags}) } if m.Summary != "" { - info = append(info, yaml.MapItem{"summary", m.Summary}) + info = append(info, yaml.MapItem{Key: "summary", Value: m.Summary}) } if m.Description != "" { - info = append(info, yaml.MapItem{"description", m.Description}) + info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) } if m.ExternalDocs != nil { - info = append(info, yaml.MapItem{"externalDocs", m.ExternalDocs.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: "externalDocs", Value: m.ExternalDocs.ToRawInfo()}) } // &{Name:externalDocs Type:ExternalDocs StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if m.OperationId != "" { - info = append(info, yaml.MapItem{"operationId", m.OperationId}) + info = append(info, yaml.MapItem{Key: "operationId", Value: m.OperationId}) } if len(m.Produces) != 0 { - info = append(info, yaml.MapItem{"produces", m.Produces}) + info = append(info, yaml.MapItem{Key: "produces", Value: m.Produces}) } if len(m.Consumes) != 0 { - info = append(info, yaml.MapItem{"consumes", m.Consumes}) + info = append(info, yaml.MapItem{Key: "consumes", Value: m.Consumes}) } if len(m.Parameters) != 0 { items := make([]interface{}, 0) for _, item := range m.Parameters { items = append(items, item.ToRawInfo()) } - info = append(info, yaml.MapItem{"parameters", items}) + info = append(info, yaml.MapItem{Key: "parameters", Value: items}) } // &{Name:parameters Type:ParametersItem StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:The parameters needed to send a valid API call.} if m.Responses != nil { - info = append(info, yaml.MapItem{"responses", m.Responses.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: "responses", Value: m.Responses.ToRawInfo()}) } // &{Name:responses Type:Responses StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if len(m.Schemes) != 0 { - info = append(info, yaml.MapItem{"schemes", m.Schemes}) + info = append(info, yaml.MapItem{Key: "schemes", Value: m.Schemes}) } if m.Deprecated != false { - info = append(info, yaml.MapItem{"deprecated", m.Deprecated}) + info = append(info, yaml.MapItem{Key: "deprecated", Value: m.Deprecated}) } if len(m.Security) != 0 { items := make([]interface{}, 0) for _, item := range m.Security { items = append(items, item.ToRawInfo()) } - info = append(info, yaml.MapItem{"security", items}) + info = append(info, yaml.MapItem{Key: "security", Value: items}) } // &{Name:security Type:SecurityRequirement StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:} if m.VendorExtension != nil { for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) } } // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} @@ -8024,7 +8024,7 @@ func (m *ParameterDefinitions) ToRawInfo() interface{} { info := yaml.MapSlice{} if m.AdditionalProperties != nil { for _, item := range m.AdditionalProperties { - info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) } } // &{Name:additionalProperties Type:NamedParameter StringEnumValues:[] MapType:Parameter Repeated:true Pattern: Implicit:true Description:} @@ -8052,34 +8052,34 @@ func (m *ParametersItem) ToRawInfo() interface{} { func (m *PathItem) ToRawInfo() interface{} { info := yaml.MapSlice{} if m.XRef != "" { - info = append(info, yaml.MapItem{"$ref", m.XRef}) + info = append(info, yaml.MapItem{Key: "$ref", Value: m.XRef}) } if m.Get != nil { - info = append(info, yaml.MapItem{"get", m.Get.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: "get", Value: m.Get.ToRawInfo()}) } // &{Name:get Type:Operation StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if m.Put != nil { - info = append(info, yaml.MapItem{"put", m.Put.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: "put", Value: m.Put.ToRawInfo()}) } // &{Name:put Type:Operation StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if m.Post != nil { - info = append(info, yaml.MapItem{"post", m.Post.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: "post", Value: m.Post.ToRawInfo()}) } // &{Name:post Type:Operation StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if m.Delete != nil { - info = append(info, yaml.MapItem{"delete", m.Delete.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: "delete", Value: m.Delete.ToRawInfo()}) } // &{Name:delete Type:Operation StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if m.Options != nil { - info = append(info, yaml.MapItem{"options", m.Options.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: "options", Value: m.Options.ToRawInfo()}) } // &{Name:options Type:Operation StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if m.Head != nil { - info = append(info, yaml.MapItem{"head", m.Head.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: "head", Value: m.Head.ToRawInfo()}) } // &{Name:head Type:Operation StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if m.Patch != nil { - info = append(info, yaml.MapItem{"patch", m.Patch.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: "patch", Value: m.Patch.ToRawInfo()}) } // &{Name:patch Type:Operation StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if len(m.Parameters) != 0 { @@ -8087,12 +8087,12 @@ func (m *PathItem) ToRawInfo() interface{} { for _, item := range m.Parameters { items = append(items, item.ToRawInfo()) } - info = append(info, yaml.MapItem{"parameters", items}) + info = append(info, yaml.MapItem{Key: "parameters", Value: items}) } // &{Name:parameters Type:ParametersItem StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:The parameters needed to send a valid API call.} if m.VendorExtension != nil { for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) } } // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} @@ -8103,78 +8103,78 @@ func (m *PathItem) ToRawInfo() interface{} { func (m *PathParameterSubSchema) ToRawInfo() interface{} { info := yaml.MapSlice{} if m.Required != false { - info = append(info, yaml.MapItem{"required", m.Required}) + info = append(info, yaml.MapItem{Key: "required", Value: m.Required}) } if m.In != "" { - info = append(info, yaml.MapItem{"in", m.In}) + info = append(info, yaml.MapItem{Key: "in", Value: m.In}) } if m.Description != "" { - info = append(info, yaml.MapItem{"description", m.Description}) + info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) } if m.Name != "" { - info = append(info, yaml.MapItem{"name", m.Name}) + info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) } if m.Type != "" { - info = append(info, yaml.MapItem{"type", m.Type}) + info = append(info, yaml.MapItem{Key: "type", Value: m.Type}) } if m.Format != "" { - info = append(info, yaml.MapItem{"format", m.Format}) + info = append(info, yaml.MapItem{Key: "format", Value: m.Format}) } if m.Items != nil { - info = append(info, yaml.MapItem{"items", m.Items.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: "items", Value: m.Items.ToRawInfo()}) } // &{Name:items Type:PrimitivesItems StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if m.CollectionFormat != "" { - info = append(info, yaml.MapItem{"collectionFormat", m.CollectionFormat}) + info = append(info, yaml.MapItem{Key: "collectionFormat", Value: m.CollectionFormat}) } if m.Default != nil { - info = append(info, yaml.MapItem{"default", m.Default.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: "default", Value: m.Default.ToRawInfo()}) } // &{Name:default Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if m.Maximum != 0.0 { - info = append(info, yaml.MapItem{"maximum", m.Maximum}) + info = append(info, yaml.MapItem{Key: "maximum", Value: m.Maximum}) } if m.ExclusiveMaximum != false { - info = append(info, yaml.MapItem{"exclusiveMaximum", m.ExclusiveMaximum}) + info = append(info, yaml.MapItem{Key: "exclusiveMaximum", Value: m.ExclusiveMaximum}) } if m.Minimum != 0.0 { - info = append(info, yaml.MapItem{"minimum", m.Minimum}) + info = append(info, yaml.MapItem{Key: "minimum", Value: m.Minimum}) } if m.ExclusiveMinimum != false { - info = append(info, yaml.MapItem{"exclusiveMinimum", m.ExclusiveMinimum}) + info = append(info, yaml.MapItem{Key: "exclusiveMinimum", Value: m.ExclusiveMinimum}) } if m.MaxLength != 0 { - info = append(info, yaml.MapItem{"maxLength", m.MaxLength}) + info = append(info, yaml.MapItem{Key: "maxLength", Value: m.MaxLength}) } if m.MinLength != 0 { - info = append(info, yaml.MapItem{"minLength", m.MinLength}) + info = append(info, yaml.MapItem{Key: "minLength", Value: m.MinLength}) } if m.Pattern != "" { - info = append(info, yaml.MapItem{"pattern", m.Pattern}) + info = append(info, yaml.MapItem{Key: "pattern", Value: m.Pattern}) } if m.MaxItems != 0 { - info = append(info, yaml.MapItem{"maxItems", m.MaxItems}) + info = append(info, yaml.MapItem{Key: "maxItems", Value: m.MaxItems}) } if m.MinItems != 0 { - info = append(info, yaml.MapItem{"minItems", m.MinItems}) + info = append(info, yaml.MapItem{Key: "minItems", Value: m.MinItems}) } if m.UniqueItems != false { - info = append(info, yaml.MapItem{"uniqueItems", m.UniqueItems}) + info = append(info, yaml.MapItem{Key: "uniqueItems", Value: m.UniqueItems}) } if len(m.Enum) != 0 { items := make([]interface{}, 0) for _, item := range m.Enum { items = append(items, item.ToRawInfo()) } - info = append(info, yaml.MapItem{"enum", items}) + info = append(info, yaml.MapItem{Key: "enum", Value: items}) } // &{Name:enum Type:Any StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:} if m.MultipleOf != 0.0 { - info = append(info, yaml.MapItem{"multipleOf", m.MultipleOf}) + info = append(info, yaml.MapItem{Key: "multipleOf", Value: m.MultipleOf}) } if m.VendorExtension != nil { for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) } } // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} @@ -8186,13 +8186,13 @@ func (m *Paths) ToRawInfo() interface{} { info := yaml.MapSlice{} if m.VendorExtension != nil { for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) } } // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} if m.Path != nil { for _, item := range m.Path { - info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) } } // &{Name:Path Type:NamedPathItem StringEnumValues:[] MapType:PathItem Repeated:true Pattern:^/ Implicit:true Description:} @@ -8203,66 +8203,66 @@ func (m *Paths) ToRawInfo() interface{} { func (m *PrimitivesItems) ToRawInfo() interface{} { info := yaml.MapSlice{} if m.Type != "" { - info = append(info, yaml.MapItem{"type", m.Type}) + info = append(info, yaml.MapItem{Key: "type", Value: m.Type}) } if m.Format != "" { - info = append(info, yaml.MapItem{"format", m.Format}) + info = append(info, yaml.MapItem{Key: "format", Value: m.Format}) } if m.Items != nil { - info = append(info, yaml.MapItem{"items", m.Items.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: "items", Value: m.Items.ToRawInfo()}) } // &{Name:items Type:PrimitivesItems StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if m.CollectionFormat != "" { - info = append(info, yaml.MapItem{"collectionFormat", m.CollectionFormat}) + info = append(info, yaml.MapItem{Key: "collectionFormat", Value: m.CollectionFormat}) } if m.Default != nil { - info = append(info, yaml.MapItem{"default", m.Default.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: "default", Value: m.Default.ToRawInfo()}) } // &{Name:default Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if m.Maximum != 0.0 { - info = append(info, yaml.MapItem{"maximum", m.Maximum}) + info = append(info, yaml.MapItem{Key: "maximum", Value: m.Maximum}) } if m.ExclusiveMaximum != false { - info = append(info, yaml.MapItem{"exclusiveMaximum", m.ExclusiveMaximum}) + info = append(info, yaml.MapItem{Key: "exclusiveMaximum", Value: m.ExclusiveMaximum}) } if m.Minimum != 0.0 { - info = append(info, yaml.MapItem{"minimum", m.Minimum}) + info = append(info, yaml.MapItem{Key: "minimum", Value: m.Minimum}) } if m.ExclusiveMinimum != false { - info = append(info, yaml.MapItem{"exclusiveMinimum", m.ExclusiveMinimum}) + info = append(info, yaml.MapItem{Key: "exclusiveMinimum", Value: m.ExclusiveMinimum}) } if m.MaxLength != 0 { - info = append(info, yaml.MapItem{"maxLength", m.MaxLength}) + info = append(info, yaml.MapItem{Key: "maxLength", Value: m.MaxLength}) } if m.MinLength != 0 { - info = append(info, yaml.MapItem{"minLength", m.MinLength}) + info = append(info, yaml.MapItem{Key: "minLength", Value: m.MinLength}) } if m.Pattern != "" { - info = append(info, yaml.MapItem{"pattern", m.Pattern}) + info = append(info, yaml.MapItem{Key: "pattern", Value: m.Pattern}) } if m.MaxItems != 0 { - info = append(info, yaml.MapItem{"maxItems", m.MaxItems}) + info = append(info, yaml.MapItem{Key: "maxItems", Value: m.MaxItems}) } if m.MinItems != 0 { - info = append(info, yaml.MapItem{"minItems", m.MinItems}) + info = append(info, yaml.MapItem{Key: "minItems", Value: m.MinItems}) } if m.UniqueItems != false { - info = append(info, yaml.MapItem{"uniqueItems", m.UniqueItems}) + info = append(info, yaml.MapItem{Key: "uniqueItems", Value: m.UniqueItems}) } if len(m.Enum) != 0 { items := make([]interface{}, 0) for _, item := range m.Enum { items = append(items, item.ToRawInfo()) } - info = append(info, yaml.MapItem{"enum", items}) + info = append(info, yaml.MapItem{Key: "enum", Value: items}) } // &{Name:enum Type:Any StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:} if m.MultipleOf != 0.0 { - info = append(info, yaml.MapItem{"multipleOf", m.MultipleOf}) + info = append(info, yaml.MapItem{Key: "multipleOf", Value: m.MultipleOf}) } if m.VendorExtension != nil { for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) } } // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} @@ -8274,7 +8274,7 @@ func (m *Properties) ToRawInfo() interface{} { info := yaml.MapSlice{} if m.AdditionalProperties != nil { for _, item := range m.AdditionalProperties { - info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) } } // &{Name:additionalProperties Type:NamedSchema StringEnumValues:[] MapType:Schema Repeated:true Pattern: Implicit:true Description:} @@ -8285,81 +8285,81 @@ func (m *Properties) ToRawInfo() interface{} { func (m *QueryParameterSubSchema) ToRawInfo() interface{} { info := yaml.MapSlice{} if m.Required != false { - info = append(info, yaml.MapItem{"required", m.Required}) + info = append(info, yaml.MapItem{Key: "required", Value: m.Required}) } if m.In != "" { - info = append(info, yaml.MapItem{"in", m.In}) + info = append(info, yaml.MapItem{Key: "in", Value: m.In}) } if m.Description != "" { - info = append(info, yaml.MapItem{"description", m.Description}) + info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) } if m.Name != "" { - info = append(info, yaml.MapItem{"name", m.Name}) + info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) } if m.AllowEmptyValue != false { - info = append(info, yaml.MapItem{"allowEmptyValue", m.AllowEmptyValue}) + info = append(info, yaml.MapItem{Key: "allowEmptyValue", Value: m.AllowEmptyValue}) } if m.Type != "" { - info = append(info, yaml.MapItem{"type", m.Type}) + info = append(info, yaml.MapItem{Key: "type", Value: m.Type}) } if m.Format != "" { - info = append(info, yaml.MapItem{"format", m.Format}) + info = append(info, yaml.MapItem{Key: "format", Value: m.Format}) } if m.Items != nil { - info = append(info, yaml.MapItem{"items", m.Items.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: "items", Value: m.Items.ToRawInfo()}) } // &{Name:items Type:PrimitivesItems StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if m.CollectionFormat != "" { - info = append(info, yaml.MapItem{"collectionFormat", m.CollectionFormat}) + info = append(info, yaml.MapItem{Key: "collectionFormat", Value: m.CollectionFormat}) } if m.Default != nil { - info = append(info, yaml.MapItem{"default", m.Default.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: "default", Value: m.Default.ToRawInfo()}) } // &{Name:default Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if m.Maximum != 0.0 { - info = append(info, yaml.MapItem{"maximum", m.Maximum}) + info = append(info, yaml.MapItem{Key: "maximum", Value: m.Maximum}) } if m.ExclusiveMaximum != false { - info = append(info, yaml.MapItem{"exclusiveMaximum", m.ExclusiveMaximum}) + info = append(info, yaml.MapItem{Key: "exclusiveMaximum", Value: m.ExclusiveMaximum}) } if m.Minimum != 0.0 { - info = append(info, yaml.MapItem{"minimum", m.Minimum}) + info = append(info, yaml.MapItem{Key: "minimum", Value: m.Minimum}) } if m.ExclusiveMinimum != false { - info = append(info, yaml.MapItem{"exclusiveMinimum", m.ExclusiveMinimum}) + info = append(info, yaml.MapItem{Key: "exclusiveMinimum", Value: m.ExclusiveMinimum}) } if m.MaxLength != 0 { - info = append(info, yaml.MapItem{"maxLength", m.MaxLength}) + info = append(info, yaml.MapItem{Key: "maxLength", Value: m.MaxLength}) } if m.MinLength != 0 { - info = append(info, yaml.MapItem{"minLength", m.MinLength}) + info = append(info, yaml.MapItem{Key: "minLength", Value: m.MinLength}) } if m.Pattern != "" { - info = append(info, yaml.MapItem{"pattern", m.Pattern}) + info = append(info, yaml.MapItem{Key: "pattern", Value: m.Pattern}) } if m.MaxItems != 0 { - info = append(info, yaml.MapItem{"maxItems", m.MaxItems}) + info = append(info, yaml.MapItem{Key: "maxItems", Value: m.MaxItems}) } if m.MinItems != 0 { - info = append(info, yaml.MapItem{"minItems", m.MinItems}) + info = append(info, yaml.MapItem{Key: "minItems", Value: m.MinItems}) } if m.UniqueItems != false { - info = append(info, yaml.MapItem{"uniqueItems", m.UniqueItems}) + info = append(info, yaml.MapItem{Key: "uniqueItems", Value: m.UniqueItems}) } if len(m.Enum) != 0 { items := make([]interface{}, 0) for _, item := range m.Enum { items = append(items, item.ToRawInfo()) } - info = append(info, yaml.MapItem{"enum", items}) + info = append(info, yaml.MapItem{Key: "enum", Value: items}) } // &{Name:enum Type:Any StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:} if m.MultipleOf != 0.0 { - info = append(info, yaml.MapItem{"multipleOf", m.MultipleOf}) + info = append(info, yaml.MapItem{Key: "multipleOf", Value: m.MultipleOf}) } if m.VendorExtension != nil { for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) } } // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} @@ -8370,23 +8370,23 @@ func (m *QueryParameterSubSchema) ToRawInfo() interface{} { func (m *Response) ToRawInfo() interface{} { info := yaml.MapSlice{} if m.Description != "" { - info = append(info, yaml.MapItem{"description", m.Description}) + info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) } if m.Schema != nil { - info = append(info, yaml.MapItem{"schema", m.Schema.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: "schema", Value: m.Schema.ToRawInfo()}) } // &{Name:schema Type:SchemaItem StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if m.Headers != nil { - info = append(info, yaml.MapItem{"headers", m.Headers.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: "headers", Value: m.Headers.ToRawInfo()}) } // &{Name:headers Type:Headers StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if m.Examples != nil { - info = append(info, yaml.MapItem{"examples", m.Examples.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: "examples", Value: m.Examples.ToRawInfo()}) } // &{Name:examples Type:Examples StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if m.VendorExtension != nil { for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) } } // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} @@ -8398,7 +8398,7 @@ func (m *ResponseDefinitions) ToRawInfo() interface{} { info := yaml.MapSlice{} if m.AdditionalProperties != nil { for _, item := range m.AdditionalProperties { - info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) } } // &{Name:additionalProperties Type:NamedResponse StringEnumValues:[] MapType:Response Repeated:true Pattern: Implicit:true Description:} @@ -8427,13 +8427,13 @@ func (m *Responses) ToRawInfo() interface{} { info := yaml.MapSlice{} if m.ResponseCode != nil { for _, item := range m.ResponseCode { - info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) } } // &{Name:ResponseCode Type:NamedResponseValue StringEnumValues:[] MapType:ResponseValue Repeated:true Pattern:^([0-9]{3})$|^(default)$ Implicit:true Description:} if m.VendorExtension != nil { for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) } } // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} @@ -8444,80 +8444,80 @@ func (m *Responses) ToRawInfo() interface{} { func (m *Schema) ToRawInfo() interface{} { info := yaml.MapSlice{} if m.XRef != "" { - info = append(info, yaml.MapItem{"$ref", m.XRef}) + info = append(info, yaml.MapItem{Key: "$ref", Value: m.XRef}) } if m.Format != "" { - info = append(info, yaml.MapItem{"format", m.Format}) + info = append(info, yaml.MapItem{Key: "format", Value: m.Format}) } if m.Title != "" { - info = append(info, yaml.MapItem{"title", m.Title}) + info = append(info, yaml.MapItem{Key: "title", Value: m.Title}) } if m.Description != "" { - info = append(info, yaml.MapItem{"description", m.Description}) + info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) } if m.Default != nil { - info = append(info, yaml.MapItem{"default", m.Default.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: "default", Value: m.Default.ToRawInfo()}) } // &{Name:default Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if m.MultipleOf != 0.0 { - info = append(info, yaml.MapItem{"multipleOf", m.MultipleOf}) + info = append(info, yaml.MapItem{Key: "multipleOf", Value: m.MultipleOf}) } if m.Maximum != 0.0 { - info = append(info, yaml.MapItem{"maximum", m.Maximum}) + info = append(info, yaml.MapItem{Key: "maximum", Value: m.Maximum}) } if m.ExclusiveMaximum != false { - info = append(info, yaml.MapItem{"exclusiveMaximum", m.ExclusiveMaximum}) + info = append(info, yaml.MapItem{Key: "exclusiveMaximum", Value: m.ExclusiveMaximum}) } if m.Minimum != 0.0 { - info = append(info, yaml.MapItem{"minimum", m.Minimum}) + info = append(info, yaml.MapItem{Key: "minimum", Value: m.Minimum}) } if m.ExclusiveMinimum != false { - info = append(info, yaml.MapItem{"exclusiveMinimum", m.ExclusiveMinimum}) + info = append(info, yaml.MapItem{Key: "exclusiveMinimum", Value: m.ExclusiveMinimum}) } if m.MaxLength != 0 { - info = append(info, yaml.MapItem{"maxLength", m.MaxLength}) + info = append(info, yaml.MapItem{Key: "maxLength", Value: m.MaxLength}) } if m.MinLength != 0 { - info = append(info, yaml.MapItem{"minLength", m.MinLength}) + info = append(info, yaml.MapItem{Key: "minLength", Value: m.MinLength}) } if m.Pattern != "" { - info = append(info, yaml.MapItem{"pattern", m.Pattern}) + info = append(info, yaml.MapItem{Key: "pattern", Value: m.Pattern}) } if m.MaxItems != 0 { - info = append(info, yaml.MapItem{"maxItems", m.MaxItems}) + info = append(info, yaml.MapItem{Key: "maxItems", Value: m.MaxItems}) } if m.MinItems != 0 { - info = append(info, yaml.MapItem{"minItems", m.MinItems}) + info = append(info, yaml.MapItem{Key: "minItems", Value: m.MinItems}) } if m.UniqueItems != false { - info = append(info, yaml.MapItem{"uniqueItems", m.UniqueItems}) + info = append(info, yaml.MapItem{Key: "uniqueItems", Value: m.UniqueItems}) } if m.MaxProperties != 0 { - info = append(info, yaml.MapItem{"maxProperties", m.MaxProperties}) + info = append(info, yaml.MapItem{Key: "maxProperties", Value: m.MaxProperties}) } if m.MinProperties != 0 { - info = append(info, yaml.MapItem{"minProperties", m.MinProperties}) + info = append(info, yaml.MapItem{Key: "minProperties", Value: m.MinProperties}) } if len(m.Required) != 0 { - info = append(info, yaml.MapItem{"required", m.Required}) + info = append(info, yaml.MapItem{Key: "required", Value: m.Required}) } if len(m.Enum) != 0 { items := make([]interface{}, 0) for _, item := range m.Enum { items = append(items, item.ToRawInfo()) } - info = append(info, yaml.MapItem{"enum", items}) + info = append(info, yaml.MapItem{Key: "enum", Value: items}) } // &{Name:enum Type:Any StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:} if m.AdditionalProperties != nil { - info = append(info, yaml.MapItem{"additionalProperties", m.AdditionalProperties.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: "additionalProperties", Value: m.AdditionalProperties.ToRawInfo()}) } // &{Name:additionalProperties Type:AdditionalPropertiesItem StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if m.Type != nil { if len(m.Type.Value) == 1 { - info = append(info, yaml.MapItem{"type", m.Type.Value[0]}) + info = append(info, yaml.MapItem{Key: "type", Value: m.Type.Value[0]}) } else { - info = append(info, yaml.MapItem{"type", m.Type.Value}) + info = append(info, yaml.MapItem{Key: "type", Value: m.Type.Value}) } } // &{Name:type Type:TypeItem StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} @@ -8526,7 +8526,7 @@ func (m *Schema) ToRawInfo() interface{} { for _, item := range m.Items.Schema { items = append(items, item.ToRawInfo()) } - info = append(info, yaml.MapItem{"items", items[0]}) + info = append(info, yaml.MapItem{Key: "items", Value: items[0]}) } // &{Name:items Type:ItemsItem StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if len(m.AllOf) != 0 { @@ -8534,34 +8534,34 @@ func (m *Schema) ToRawInfo() interface{} { for _, item := range m.AllOf { items = append(items, item.ToRawInfo()) } - info = append(info, yaml.MapItem{"allOf", items}) + info = append(info, yaml.MapItem{Key: "allOf", Value: items}) } // &{Name:allOf Type:Schema StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:} if m.Properties != nil { - info = append(info, yaml.MapItem{"properties", m.Properties.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: "properties", Value: m.Properties.ToRawInfo()}) } // &{Name:properties Type:Properties StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if m.Discriminator != "" { - info = append(info, yaml.MapItem{"discriminator", m.Discriminator}) + info = append(info, yaml.MapItem{Key: "discriminator", Value: m.Discriminator}) } if m.ReadOnly != false { - info = append(info, yaml.MapItem{"readOnly", m.ReadOnly}) + info = append(info, yaml.MapItem{Key: "readOnly", Value: m.ReadOnly}) } if m.Xml != nil { - info = append(info, yaml.MapItem{"xml", m.Xml.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: "xml", Value: m.Xml.ToRawInfo()}) } // &{Name:xml Type:Xml StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if m.ExternalDocs != nil { - info = append(info, yaml.MapItem{"externalDocs", m.ExternalDocs.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: "externalDocs", Value: m.ExternalDocs.ToRawInfo()}) } // &{Name:externalDocs Type:ExternalDocs StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if m.Example != nil { - info = append(info, yaml.MapItem{"example", m.Example.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: "example", Value: m.Example.ToRawInfo()}) } // &{Name:example Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if m.VendorExtension != nil { for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) } } // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} @@ -8590,7 +8590,7 @@ func (m *SecurityDefinitions) ToRawInfo() interface{} { info := yaml.MapSlice{} if m.AdditionalProperties != nil { for _, item := range m.AdditionalProperties { - info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) } } // &{Name:additionalProperties Type:NamedSecurityDefinitionsItem StringEnumValues:[] MapType:SecurityDefinitionsItem Repeated:true Pattern: Implicit:true Description:} @@ -8639,7 +8639,7 @@ func (m *SecurityRequirement) ToRawInfo() interface{} { info := yaml.MapSlice{} if m.AdditionalProperties != nil { for _, item := range m.AdditionalProperties { - info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) } } // &{Name:additionalProperties Type:NamedStringArray StringEnumValues:[] MapType:StringArray Repeated:true Pattern: Implicit:true Description:} @@ -8655,18 +8655,18 @@ func (m *StringArray) ToRawInfo() interface{} { func (m *Tag) ToRawInfo() interface{} { info := yaml.MapSlice{} if m.Name != "" { - info = append(info, yaml.MapItem{"name", m.Name}) + info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) } if m.Description != "" { - info = append(info, yaml.MapItem{"description", m.Description}) + info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) } if m.ExternalDocs != nil { - info = append(info, yaml.MapItem{"externalDocs", m.ExternalDocs.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: "externalDocs", Value: m.ExternalDocs.ToRawInfo()}) } // &{Name:externalDocs Type:ExternalDocs StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} if m.VendorExtension != nil { for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) } } // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} @@ -8677,7 +8677,7 @@ func (m *Tag) ToRawInfo() interface{} { func (m *TypeItem) ToRawInfo() interface{} { info := yaml.MapSlice{} if len(m.Value) != 0 { - info = append(info, yaml.MapItem{"value", m.Value}) + info = append(info, yaml.MapItem{Key: "value", Value: m.Value}) } return info } @@ -8687,7 +8687,7 @@ func (m *VendorExtension) ToRawInfo() interface{} { info := yaml.MapSlice{} if m.AdditionalProperties != nil { for _, item := range m.AdditionalProperties { - info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) } } // &{Name:additionalProperties Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern: Implicit:true Description:} @@ -8698,23 +8698,23 @@ func (m *VendorExtension) ToRawInfo() interface{} { func (m *Xml) ToRawInfo() interface{} { info := yaml.MapSlice{} if m.Name != "" { - info = append(info, yaml.MapItem{"name", m.Name}) + info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) } if m.Namespace != "" { - info = append(info, yaml.MapItem{"namespace", m.Namespace}) + info = append(info, yaml.MapItem{Key: "namespace", Value: m.Namespace}) } if m.Prefix != "" { - info = append(info, yaml.MapItem{"prefix", m.Prefix}) + info = append(info, yaml.MapItem{Key: "prefix", Value: m.Prefix}) } if m.Attribute != false { - info = append(info, yaml.MapItem{"attribute", m.Attribute}) + info = append(info, yaml.MapItem{Key: "attribute", Value: m.Attribute}) } if m.Wrapped != false { - info = append(info, yaml.MapItem{"wrapped", m.Wrapped}) + info = append(info, yaml.MapItem{Key: "wrapped", Value: m.Wrapped}) } if m.VendorExtension != nil { for _, item := range m.VendorExtension { - info = append(info, yaml.MapItem{item.Name, item.Value.ToRawInfo()}) + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) } } // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} diff --git a/vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.pb.go b/vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.pb.go index 37da7df256f..a030fa67653 100644 --- a/vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.pb.go +++ b/vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.pb.go @@ -1,6 +1,5 @@ -// Code generated by protoc-gen-go. +// Code generated by protoc-gen-go. DO NOT EDIT. // source: OpenAPIv2/OpenAPIv2.proto -// DO NOT EDIT! /* Package openapi_v2 is a generated protocol buffer package. @@ -4257,7 +4256,7 @@ func init() { proto.RegisterFile("OpenAPIv2/OpenAPIv2.proto", fileDescriptor0) } var fileDescriptor0 = []byte{ // 3129 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xec, 0x3b, 0x4b, 0x73, 0x1c, 0x57, + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x3b, 0x4b, 0x73, 0x1c, 0x57, 0xd5, 0xf3, 0x7e, 0x1c, 0x69, 0x46, 0xa3, 0x96, 0x2c, 0xb7, 0x24, 0xc7, 0x71, 0xe4, 0x3c, 0x6c, 0xe7, 0xb3, 0x9c, 0x4f, 0x29, 0x48, 0x05, 0x2a, 0x05, 0xf2, 0xab, 0xc6, 0xc4, 0x44, 0x4a, 0xcb, 0x0e, 0x09, 0x04, 0xba, 0xae, 0x66, 0xee, 0x48, 0x9d, 0x74, 0xf7, 0x6d, 0x77, 0xf7, 0xc8, 0x1a, diff --git a/vendor/github.com/googleapis/gnostic/compiler/reader.go b/vendor/github.com/googleapis/gnostic/compiler/reader.go index 2d4b3303db4..c954a2d9b24 100644 --- a/vendor/github.com/googleapis/gnostic/compiler/reader.go +++ b/vendor/github.com/googleapis/gnostic/compiler/reader.go @@ -110,7 +110,9 @@ func ReadInfoFromBytes(filename string, bytes []byte) (interface{}, error) { if err != nil { return nil, err } - infoCache[filename] = info + if len(filename) > 0 { + infoCache[filename] = info + } return info, nil } diff --git a/vendor/github.com/googleapis/gnostic/extensions/extension.pb.go b/vendor/github.com/googleapis/gnostic/extensions/extension.pb.go index 7c6b9149673..749ff784166 100644 --- a/vendor/github.com/googleapis/gnostic/extensions/extension.pb.go +++ b/vendor/github.com/googleapis/gnostic/extensions/extension.pb.go @@ -1,6 +1,5 @@ -// Code generated by protoc-gen-go. +// Code generated by protoc-gen-go. DO NOT EDIT. // source: extension.proto -// DO NOT EDIT! /* Package openapiextension_v1 is a generated protocol buffer package. @@ -78,7 +77,7 @@ func (m *Version) GetSuffix() string { // An encoded Request is written to the ExtensionHandler's stdin. type ExtensionHandlerRequest struct { // The OpenAPI descriptions that were explicitly listed on the command line. - // The specifications will appear in the order they are specified to openapic. + // The specifications will appear in the order they are specified to gnostic. Wrapper *Wrapper `protobuf:"bytes,1,opt,name=wrapper" json:"wrapper,omitempty"` // The version number of openapi compiler. CompilerVersion *Version `protobuf:"bytes,3,opt,name=compiler_version,json=compilerVersion" json:"compiler_version,omitempty"` @@ -192,28 +191,28 @@ func init() { func init() { proto.RegisterFile("extension.proto", fileDescriptor0) } var fileDescriptor0 = []byte{ - // 355 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x91, 0x4d, 0x4b, 0xf3, 0x40, - 0x1c, 0xc4, 0x49, 0xdf, 0xf2, 0x64, 0x1f, 0xb4, 0xb2, 0x16, 0x8d, 0xe2, 0xa1, 0x04, 0x84, 0x22, - 0xb8, 0xa5, 0x0a, 0xde, 0x5b, 0x28, 0xea, 0xc5, 0x96, 0x3d, 0xd4, 0x9b, 0x65, 0x9b, 0xfe, 0xdb, - 0x46, 0x92, 0xdd, 0x75, 0xf3, 0x62, 0xfb, 0x55, 0x3c, 0xfa, 0x49, 0x25, 0xbb, 0xd9, 0x7a, 0x50, - 0x6f, 0x99, 0x1f, 0x93, 0xfc, 0x67, 0x26, 0xa8, 0x0d, 0xdb, 0x0c, 0x78, 0x1a, 0x09, 0x4e, 0xa4, - 0x12, 0x99, 0xc0, 0xc7, 0x42, 0x02, 0x67, 0x32, 0xfa, 0xe6, 0xc5, 0xe0, 0xfc, 0x6c, 0x2d, 0xc4, - 0x3a, 0x86, 0xbe, 0xb6, 0x2c, 0xf2, 0x55, 0x9f, 0xf1, 0x9d, 0xf1, 0x07, 0x21, 0x72, 0x67, 0xa0, - 0x4a, 0x23, 0xee, 0xa0, 0x66, 0xc2, 0x5e, 0x85, 0xf2, 0x9d, 0xae, 0xd3, 0x6b, 0x52, 0x23, 0x34, - 0x8d, 0xb8, 0x50, 0x7e, 0xad, 0xa2, 0xa5, 0x28, 0xa9, 0x64, 0x59, 0xb8, 0xf1, 0xeb, 0x86, 0x6a, - 0x81, 0x4f, 0x50, 0x2b, 0xcd, 0x57, 0xab, 0x68, 0xeb, 0x37, 0xba, 0x4e, 0xcf, 0xa3, 0x95, 0x0a, - 0x3e, 0x1c, 0x74, 0x3a, 0xb6, 0x81, 0x1e, 0x18, 0x5f, 0xc6, 0xa0, 0x28, 0xbc, 0xe5, 0x90, 0x66, - 0xf8, 0x0e, 0xb9, 0xef, 0x8a, 0x49, 0x09, 0xe6, 0xee, 0xff, 0x9b, 0x0b, 0xf2, 0x4b, 0x05, 0xf2, - 0x6c, 0x3c, 0xd4, 0x9a, 0xf1, 0x3d, 0x3a, 0x0a, 0x45, 0x22, 0xa3, 0x18, 0xd4, 0xbc, 0x30, 0x0d, - 0x74, 0x98, 0xbf, 0x3e, 0x50, 0xb5, 0xa4, 0x6d, 0xfb, 0x56, 0x05, 0x82, 0x02, 0xf9, 0x3f, 0xb3, - 0xa5, 0x52, 0xf0, 0x14, 0xb0, 0x8f, 0xdc, 0x8d, 0x46, 0x4b, 0x1d, 0xee, 0x1f, 0xb5, 0xb2, 0x1c, - 0x00, 0x94, 0xd2, 0xb3, 0xd4, 0x7b, 0x1e, 0x35, 0x02, 0x5f, 0xa1, 0x66, 0xc1, 0xe2, 0x1c, 0xaa, - 0x24, 0x1d, 0x62, 0x86, 0x27, 0x76, 0x78, 0x32, 0xe4, 0x3b, 0x6a, 0x2c, 0xc1, 0x0b, 0x72, 0xab, - 0x52, 0xe5, 0x19, 0x5b, 0xc1, 0xd1, 0xc3, 0x59, 0x89, 0x2f, 0xd1, 0xe1, 0xbe, 0xc5, 0x9c, 0xb3, - 0x04, 0xf4, 0x6f, 0xf0, 0xe8, 0xc1, 0x9e, 0x3e, 0xb1, 0x04, 0x30, 0x46, 0x8d, 0x1d, 0x4b, 0x62, - 0x7d, 0xd6, 0xa3, 0xfa, 0x79, 0x74, 0x8d, 0xda, 0x42, 0xad, 0xed, 0x16, 0x21, 0x29, 0x06, 0x23, - 0x3c, 0x91, 0xc0, 0x87, 0xd3, 0xc7, 0x7d, 0xdf, 0xd9, 0x60, 0xea, 0x7c, 0xd6, 0xea, 0x93, 0xe1, - 0x78, 0xd1, 0xd2, 0x19, 0x6f, 0xbf, 0x02, 0x00, 0x00, 0xff, 0xff, 0xfc, 0x56, 0x40, 0x4d, 0x52, - 0x02, 0x00, 0x00, + // 357 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x91, 0x4d, 0x4b, 0xc3, 0x40, + 0x18, 0x84, 0x49, 0xbf, 0x62, 0x56, 0x6c, 0x65, 0x2d, 0x1a, 0xc5, 0x43, 0x09, 0x08, 0x45, 0x64, + 0x4b, 0x15, 0xbc, 0xb7, 0x50, 0xd4, 0x8b, 0x2d, 0x7b, 0xa8, 0x37, 0xcb, 0x36, 0x7d, 0x9b, 0x46, + 0x92, 0xdd, 0x75, 0xf3, 0x61, 0xfb, 0x57, 0x3c, 0xfa, 0x4b, 0x25, 0xbb, 0x49, 0x3d, 0xa8, 0xb7, + 0xcc, 0xc3, 0x24, 0xef, 0xcc, 0x04, 0x75, 0x60, 0x9b, 0x02, 0x4f, 0x42, 0xc1, 0x89, 0x54, 0x22, + 0x15, 0xf8, 0x44, 0x48, 0xe0, 0x4c, 0x86, 0x3f, 0x3c, 0x1f, 0x5e, 0x9c, 0x07, 0x42, 0x04, 0x11, + 0x0c, 0xb4, 0x65, 0x99, 0xad, 0x07, 0x8c, 0xef, 0x8c, 0xdf, 0xf3, 0x91, 0x3d, 0x07, 0x55, 0x18, + 0x71, 0x17, 0x35, 0x63, 0xf6, 0x26, 0x94, 0x6b, 0xf5, 0xac, 0x7e, 0x93, 0x1a, 0xa1, 0x69, 0xc8, + 0x85, 0x72, 0x6b, 0x25, 0x2d, 0x44, 0x41, 0x25, 0x4b, 0xfd, 0x8d, 0x5b, 0x37, 0x54, 0x0b, 0x7c, + 0x8a, 0x5a, 0x49, 0xb6, 0x5e, 0x87, 0x5b, 0xb7, 0xd1, 0xb3, 0xfa, 0x0e, 0x2d, 0x95, 0xf7, 0x69, + 0xa1, 0xb3, 0x49, 0x15, 0xe8, 0x91, 0xf1, 0x55, 0x04, 0x8a, 0xc2, 0x7b, 0x06, 0x49, 0x8a, 0xef, + 0x91, 0xfd, 0xa1, 0x98, 0x94, 0x60, 0xee, 0x1e, 0xde, 0x5e, 0x92, 0x3f, 0x2a, 0x90, 0x17, 0xe3, + 0xa1, 0x95, 0x19, 0x3f, 0xa0, 0x63, 0x5f, 0xc4, 0x32, 0x8c, 0x40, 0x2d, 0x72, 0xd3, 0x40, 0x87, + 0xf9, 0xef, 0x03, 0x65, 0x4b, 0xda, 0xa9, 0xde, 0x2a, 0x81, 0x97, 0x23, 0xf7, 0x77, 0xb6, 0x44, + 0x0a, 0x9e, 0x00, 0x76, 0x91, 0xbd, 0xd1, 0x68, 0xa5, 0xc3, 0x1d, 0xd0, 0x4a, 0x16, 0x03, 0x80, + 0x52, 0x7a, 0x96, 0x7a, 0xdf, 0xa1, 0x46, 0xe0, 0x6b, 0xd4, 0xcc, 0x59, 0x94, 0x41, 0x99, 0xa4, + 0x4b, 0xcc, 0xf0, 0xa4, 0x1a, 0x9e, 0x8c, 0xf8, 0x8e, 0x1a, 0x8b, 0xf7, 0x8a, 0xec, 0xb2, 0x54, + 0x71, 0xa6, 0xaa, 0x60, 0xe9, 0xe1, 0x2a, 0x89, 0xaf, 0x50, 0x7b, 0xdf, 0x62, 0xc1, 0x59, 0x0c, + 0xfa, 0x37, 0x38, 0xf4, 0x68, 0x4f, 0x9f, 0x59, 0x0c, 0x18, 0xa3, 0xc6, 0x8e, 0xc5, 0x91, 0x3e, + 0xeb, 0x50, 0xfd, 0x3c, 0xbe, 0x41, 0x6d, 0xa1, 0x02, 0x12, 0x70, 0x91, 0xa4, 0xa1, 0x4f, 0xf2, + 0xe1, 0x18, 0x4f, 0x25, 0xf0, 0xd1, 0xec, 0x69, 0x5f, 0x77, 0x3e, 0x9c, 0x59, 0x5f, 0xb5, 0xfa, + 0x74, 0x34, 0x59, 0xb6, 0x74, 0xc4, 0xbb, 0xef, 0x00, 0x00, 0x00, 0xff, 0xff, 0x84, 0x5c, 0x6b, + 0x80, 0x51, 0x02, 0x00, 0x00, } diff --git a/vendor/github.com/googleapis/gnostic/extensions/extension.proto b/vendor/github.com/googleapis/gnostic/extensions/extension.proto index 806760a1329..04856f913b1 100644 --- a/vendor/github.com/googleapis/gnostic/extensions/extension.proto +++ b/vendor/github.com/googleapis/gnostic/extensions/extension.proto @@ -29,7 +29,7 @@ option java_multiple_files = true; option java_outer_classname = "OpenAPIExtensionV1"; // The Java package name must be proto package name with proper prefix. -option java_package = "org.openapic.v1"; +option java_package = "org.gnostic.v1"; // A reasonable prefix for the Objective-C symbols generated from the package. // It should at a minimum be 3 characters long, all uppercase, and convention @@ -53,7 +53,7 @@ message Version { message ExtensionHandlerRequest { // The OpenAPI descriptions that were explicitly listed on the command line. - // The specifications will appear in the order they are specified to openapic. + // The specifications will appear in the order they are specified to gnostic. Wrapper wrapper = 1; // The version number of openapi compiler. diff --git a/vendor/github.com/gophercloud/gophercloud/.gitignore b/vendor/github.com/gophercloud/gophercloud/.gitignore index df9048a010a..dd91ed20559 100644 --- a/vendor/github.com/gophercloud/gophercloud/.gitignore +++ b/vendor/github.com/gophercloud/gophercloud/.gitignore @@ -1,2 +1,3 @@ **/*.swp .idea +.vscode diff --git a/vendor/github.com/gophercloud/gophercloud/.travis.yml b/vendor/github.com/gophercloud/gophercloud/.travis.yml index 59c41949527..02728f49682 100644 --- a/vendor/github.com/gophercloud/gophercloud/.travis.yml +++ b/vendor/github.com/gophercloud/gophercloud/.travis.yml @@ -7,8 +7,8 @@ install: - go get github.com/mattn/goveralls - go get golang.org/x/tools/cmd/goimports go: -- 1.8 -- tip +- "1.10" +- "tip" env: global: - secure: "xSQsAG5wlL9emjbCdxzz/hYQsSpJ/bABO1kkbwMSISVcJ3Nk0u4ywF+LS4bgeOnwPfmFvNTOqVDu3RwEvMeWXSI76t1piCPcObutb2faKLVD/hLoAS76gYX+Z8yGWGHrSB7Do5vTPj1ERe2UljdrnsSeOXzoDwFxYRaZLX4bBOB4AyoGvRniil5QXPATiA1tsWX1VMicj8a4F8X+xeESzjt1Q5Iy31e7vkptu71bhvXCaoo5QhYwT+pLR9dN0S1b7Ro0KVvkRefmr1lUOSYd2e74h6Lc34tC1h3uYZCS4h47t7v5cOXvMNxinEj2C51RvbjvZI1RLVdkuAEJD1Iz4+Ote46nXbZ//6XRZMZz/YxQ13l7ux1PFjgEB6HAapmF5Xd8PRsgeTU9LRJxpiTJ3P5QJ3leS1va8qnziM5kYipj/Rn+V8g2ad/rgkRox9LSiR9VYZD2Pe45YCb1mTKSl2aIJnV7nkOqsShY5LNB4JZSg7xIffA+9YVDktw8dJlATjZqt7WvJJ49g6A61mIUV4C15q2JPGKTkZzDiG81NtmS7hFa7k0yaE2ELgYocbcuyUcAahhxntYTC0i23nJmEHVNiZmBO3u7EgpWe4KGVfumU+lt12tIn5b3dZRBBUk3QakKKozSK1QPHGpk/AZGrhu7H6l8to6IICKWtDcyMPQ=" diff --git a/vendor/github.com/gophercloud/gophercloud/.zuul.yaml b/vendor/github.com/gophercloud/gophercloud/.zuul.yaml index c259d03e184..3d4798fe6f8 100644 --- a/vendor/github.com/gophercloud/gophercloud/.zuul.yaml +++ b/vendor/github.com/gophercloud/gophercloud/.zuul.yaml @@ -1,3 +1,58 @@ +- job: + name: gophercloud-unittest + parent: golang-test + description: | + Run gophercloud unit test + run: .zuul/playbooks/gophercloud-unittest/run.yaml + nodeset: ubuntu-xenial-ut + +- job: + name: gophercloud-acceptance-test + parent: golang-test + description: | + Run gophercloud acceptance test on master branch + run: .zuul/playbooks/gophercloud-acceptance-test/run.yaml + +- job: + name: gophercloud-acceptance-test-queens + parent: gophercloud-acceptance-test + description: | + Run gophercloud acceptance test on queens branch + vars: + os_branch: 'stable/queens' + +- job: + name: gophercloud-acceptance-test-pike + parent: gophercloud-acceptance-test + description: | + Run gophercloud acceptance test on pike branch + vars: + os_branch: 'stable/pike' +- job: + name: gophercloud-acceptance-test-ocata + parent: gophercloud-acceptance-test + description: | + Run gophercloud acceptance test on ocata branch + vars: + os_branch: 'stable/ocata' + +- job: + name: gophercloud-acceptance-test-newton + parent: gophercloud-acceptance-test + description: | + Run gophercloud acceptance test on newton branch + vars: + os_branch: 'stable/newton' + +- job: + name: gophercloud-acceptance-test-mitaka + parent: gophercloud-acceptance-test + description: | + Run gophercloud acceptance test on mitaka branch + vars: + os_branch: 'stable/mitaka' + nodeset: ubuntu-trusty + - project: name: gophercloud/gophercloud check: @@ -7,6 +62,15 @@ recheck-mitaka: jobs: - gophercloud-acceptance-test-mitaka + recheck-newton: + jobs: + - gophercloud-acceptance-test-newton + recheck-ocata: + jobs: + - gophercloud-acceptance-test-ocata recheck-pike: jobs: - gophercloud-acceptance-test-pike + recheck-queens: + jobs: + - gophercloud-acceptance-test-queens diff --git a/vendor/github.com/gophercloud/gophercloud/FAQ.md b/vendor/github.com/gophercloud/gophercloud/FAQ.md deleted file mode 100644 index 88a366a288b..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/FAQ.md +++ /dev/null @@ -1,148 +0,0 @@ -# Tips - -## Implementing default logging and re-authentication attempts - -You can implement custom logging and/or limit re-auth attempts by creating a custom HTTP client -like the following and setting it as the provider client's HTTP Client (via the -`gophercloud.ProviderClient.HTTPClient` field): - -```go -//... - -// LogRoundTripper satisfies the http.RoundTripper interface and is used to -// customize the default Gophercloud RoundTripper to allow for logging. -type LogRoundTripper struct { - rt http.RoundTripper - numReauthAttempts int -} - -// newHTTPClient return a custom HTTP client that allows for logging relevant -// information before and after the HTTP request. -func newHTTPClient() http.Client { - return http.Client{ - Transport: &LogRoundTripper{ - rt: http.DefaultTransport, - }, - } -} - -// RoundTrip performs a round-trip HTTP request and logs relevant information about it. -func (lrt *LogRoundTripper) RoundTrip(request *http.Request) (*http.Response, error) { - glog.Infof("Request URL: %s\n", request.URL) - - response, err := lrt.rt.RoundTrip(request) - if response == nil { - return nil, err - } - - if response.StatusCode == http.StatusUnauthorized { - if lrt.numReauthAttempts == 3 { - return response, fmt.Errorf("Tried to re-authenticate 3 times with no success.") - } - lrt.numReauthAttempts++ - } - - glog.Debugf("Response Status: %s\n", response.Status) - - return response, nil -} - -endpoint := "https://127.0.0.1/auth" -pc := openstack.NewClient(endpoint) -pc.HTTPClient = newHTTPClient() - -//... -``` - - -## Implementing custom objects - -OpenStack request/response objects may differ among variable names or types. - -### Custom request objects - -To pass custom options to a request, implement the desired `OptsBuilder` interface. For -example, to pass in - -```go -type MyCreateServerOpts struct { - Name string - Size int -} -``` - -to `servers.Create`, simply implement the `servers.CreateOptsBuilder` interface: - -```go -func (o MyCreateServeropts) ToServerCreateMap() (map[string]interface{}, error) { - return map[string]interface{}{ - "name": o.Name, - "size": o.Size, - }, nil -} -``` - -create an instance of your custom options object, and pass it to `servers.Create`: - -```go -// ... -myOpts := MyCreateServerOpts{ - Name: "s1", - Size: "100", -} -server, err := servers.Create(computeClient, myOpts).Extract() -// ... -``` - -### Custom response objects - -Some OpenStack services have extensions. Extensions that are supported in Gophercloud can be -combined to create a custom object: - -```go -// ... -type MyVolume struct { - volumes.Volume - tenantattr.VolumeExt -} - -var v struct { - MyVolume `json:"volume"` -} - -err := volumes.Get(client, volID).ExtractInto(&v) -// ... -``` - -## Overriding default `UnmarshalJSON` method - -For some response objects, a field may be a custom type or may be allowed to take on -different types. In these cases, overriding the default `UnmarshalJSON` method may be -necessary. To do this, declare the JSON `struct` field tag as "-" and create an `UnmarshalJSON` -method on the type: - -```go -// ... -type MyVolume struct { - ID string `json: "id"` - TimeCreated time.Time `json: "-"` -} - -func (r *MyVolume) UnmarshalJSON(b []byte) error { - type tmp MyVolume - var s struct { - tmp - TimeCreated gophercloud.JSONRFC3339MilliNoZ `json:"created_at"` - } - err := json.Unmarshal(b, &s) - if err != nil { - return err - } - *r = Volume(s.tmp) - - r.TimeCreated = time.Time(s.CreatedAt) - - return err -} -// ... -``` diff --git a/vendor/github.com/gophercloud/gophercloud/MIGRATING.md b/vendor/github.com/gophercloud/gophercloud/MIGRATING.md deleted file mode 100644 index aa383c9cc9e..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/MIGRATING.md +++ /dev/null @@ -1,32 +0,0 @@ -# Compute - -## Floating IPs - -* `github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/floatingip` is now `github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/floatingips` -* `floatingips.Associate` and `floatingips.Disassociate` have been removed. -* `floatingips.DisassociateOpts` is now required to disassociate a Floating IP. - -## Security Groups - -* `secgroups.AddServerToGroup` is now `secgroups.AddServer`. -* `secgroups.RemoveServerFromGroup` is now `secgroups.RemoveServer`. - -## Servers - -* `servers.Reboot` now requires a `servers.RebootOpts` struct: - - ```golang - rebootOpts := &servers.RebootOpts{ - Type: servers.SoftReboot, - } - res := servers.Reboot(client, server.ID, rebootOpts) - ``` - -# Identity - -## V3 - -### Tokens - -* `Token.ExpiresAt` is now of type `gophercloud.JSONRFC3339Milli` instead of - `time.Time` diff --git a/vendor/github.com/gophercloud/gophercloud/README.md b/vendor/github.com/gophercloud/gophercloud/README.md index bb218c3fe9e..8c5bfce796b 100644 --- a/vendor/github.com/gophercloud/gophercloud/README.md +++ b/vendor/github.com/gophercloud/gophercloud/README.md @@ -127,7 +127,7 @@ new resource in the `server` variable (a ## Advanced Usage -Have a look at the [FAQ](./FAQ.md) for some tips on customizing the way Gophercloud works. +Have a look at the [FAQ](./docs/FAQ.md) for some tips on customizing the way Gophercloud works. ## Backwards-Compatibility Guarantees @@ -148,12 +148,12 @@ We'd like to extend special thanks and appreciation to the following: ### OpenLab - + OpenLab is providing a full CI environment to test each PR and merge for a variety of OpenStack releases. ### VEXXHOST - + VEXXHOST is providing their services to assist with the development and testing of Gophercloud. diff --git a/vendor/github.com/gophercloud/gophercloud/STYLEGUIDE.md b/vendor/github.com/gophercloud/gophercloud/STYLEGUIDE.md deleted file mode 100644 index 22a29009412..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/STYLEGUIDE.md +++ /dev/null @@ -1,79 +0,0 @@ - -## On Pull Requests - -- Please make sure to read our [contributing guide](/.github/CONTRIBUTING.md). - -- Before you start a PR there needs to be a Github issue and a discussion about it - on that issue with a core contributor, even if it's just a 'SGTM'. - -- A PR's description must reference the issue it closes with a `For ` (e.g. For #293). - -- A PR's description must contain link(s) to the line(s) in the OpenStack - source code (on Github) that prove(s) the PR code to be valid. Links to documentation - are not good enough. The link(s) should be to a non-`master` branch. For example, - a pull request implementing the creation of a Neutron v2 subnet might put the - following link in the description: - - https://github.com/openstack/neutron/blob/stable/mitaka/neutron/api/v2/attributes.py#L749 - - From that link, a reviewer (or user) can verify the fields in the request/response - objects in the PR. - -- A PR that is in-progress should have `[wip]` in front of the PR's title. When - ready for review, remove the `[wip]` and ping a core contributor with an `@`. - -- Forcing PRs to be small can have the effect of users submitting PRs in a hierarchical chain, with - one depending on the next. If a PR depends on another one, it should have a [Pending #PRNUM] - prefix in the PR title. In addition, it will be the PR submitter's responsibility to remove the - [Pending #PRNUM] tag once the PR has been updated with the merged, dependent PR. That will - let reviewers know it is ready to review. - -- A PR should be small. Even if you intend on implementing an entire - service, a PR should only be one route of that service - (e.g. create server or get server, but not both). - -- Unless explicitly asked, do not squash commits in the middle of a review; only - append. It makes it difficult for the reviewer to see what's changed from one - review to the next. - -- See [#583](https://github.com/gophercloud/gophercloud/issues/583) as an example of a - well-formatted issue which contains all relevant information we need to review and approve. - -## On Code - -- In re design: follow as closely as is reasonable the code already in the library. - Most operations (e.g. create, delete) admit the same design. - -- Unit tests and acceptance (integration) tests must be written to cover each PR. - Tests for operations with several options (e.g. list, create) should include all - the options in the tests. This will allow users to verify an operation on their - own infrastructure and see an example of usage. - -- If in doubt, ask in-line on the PR. - -### File Structure - -- The following should be used in most cases: - - - `requests.go`: contains all the functions that make HTTP requests and the - types associated with the HTTP request (parameters for URL, body, etc) - - `results.go`: contains all the response objects and their methods - - `urls.go`: contains the endpoints to which the requests are made - -### Naming - -- For methods on a type in `results.go`, the receiver should be named `r` and the - variable into which it will be unmarshalled `s`. - -- Functions in `requests.go`, with the exception of functions that return a - `pagination.Pager`, should be named returns of the name `r`. - -- Functions in `requests.go` that accept request bodies should accept as their - last parameter an `interface` named `OptsBuilder` (eg `CreateOptsBuilder`). - This `interface` should have at the least a method named `ToMap` - (eg `ToPortCreateMap`). - -- Functions in `requests.go` that accept query strings should accept as their - last parameter an `interface` named `OptsBuilder` (eg `ListOptsBuilder`). - This `interface` should have at the least a method named `ToQuery` - (eg `ToServerListQuery`). diff --git a/vendor/github.com/gophercloud/gophercloud/auth_options.go b/vendor/github.com/gophercloud/gophercloud/auth_options.go index 4211470020a..5e693585c2d 100644 --- a/vendor/github.com/gophercloud/gophercloud/auth_options.go +++ b/vendor/github.com/gophercloud/gophercloud/auth_options.go @@ -81,6 +81,17 @@ type AuthOptions struct { // TokenID allows users to authenticate (possibly as another user) with an // authentication token ID. TokenID string `json:"-"` + + // Scope determines the scoping of the authentication request. + Scope *AuthScope `json:"-"` +} + +// AuthScope allows a created token to be limited to a specific domain or project. +type AuthScope struct { + ProjectID string + ProjectName string + DomainID string + DomainName string } // ToTokenV2CreateMap allows AuthOptions to satisfy the AuthOptionsBuilder @@ -263,85 +274,83 @@ func (opts *AuthOptions) ToTokenV3CreateMap(scope map[string]interface{}) (map[s } func (opts *AuthOptions) ToTokenV3ScopeMap() (map[string]interface{}, error) { - - var scope struct { - ProjectID string - ProjectName string - DomainID string - DomainName string - } - - if opts.TenantID != "" { - scope.ProjectID = opts.TenantID - } else { - if opts.TenantName != "" { - scope.ProjectName = opts.TenantName - scope.DomainID = opts.DomainID - scope.DomainName = opts.DomainName + // For backwards compatibility. + // If AuthOptions.Scope was not set, try to determine it. + // This works well for common scenarios. + if opts.Scope == nil { + opts.Scope = new(AuthScope) + if opts.TenantID != "" { + opts.Scope.ProjectID = opts.TenantID + } else { + if opts.TenantName != "" { + opts.Scope.ProjectName = opts.TenantName + opts.Scope.DomainID = opts.DomainID + opts.Scope.DomainName = opts.DomainName + } } } - if scope.ProjectName != "" { + if opts.Scope.ProjectName != "" { // ProjectName provided: either DomainID or DomainName must also be supplied. // ProjectID may not be supplied. - if scope.DomainID == "" && scope.DomainName == "" { + if opts.Scope.DomainID == "" && opts.Scope.DomainName == "" { return nil, ErrScopeDomainIDOrDomainName{} } - if scope.ProjectID != "" { + if opts.Scope.ProjectID != "" { return nil, ErrScopeProjectIDOrProjectName{} } - if scope.DomainID != "" { + if opts.Scope.DomainID != "" { // ProjectName + DomainID return map[string]interface{}{ "project": map[string]interface{}{ - "name": &scope.ProjectName, - "domain": map[string]interface{}{"id": &scope.DomainID}, + "name": &opts.Scope.ProjectName, + "domain": map[string]interface{}{"id": &opts.Scope.DomainID}, }, }, nil } - if scope.DomainName != "" { + if opts.Scope.DomainName != "" { // ProjectName + DomainName return map[string]interface{}{ "project": map[string]interface{}{ - "name": &scope.ProjectName, - "domain": map[string]interface{}{"name": &scope.DomainName}, + "name": &opts.Scope.ProjectName, + "domain": map[string]interface{}{"name": &opts.Scope.DomainName}, }, }, nil } - } else if scope.ProjectID != "" { + } else if opts.Scope.ProjectID != "" { // ProjectID provided. ProjectName, DomainID, and DomainName may not be provided. - if scope.DomainID != "" { + if opts.Scope.DomainID != "" { return nil, ErrScopeProjectIDAlone{} } - if scope.DomainName != "" { + if opts.Scope.DomainName != "" { return nil, ErrScopeProjectIDAlone{} } // ProjectID return map[string]interface{}{ "project": map[string]interface{}{ - "id": &scope.ProjectID, + "id": &opts.Scope.ProjectID, }, }, nil - } else if scope.DomainID != "" { + } else if opts.Scope.DomainID != "" { // DomainID provided. ProjectID, ProjectName, and DomainName may not be provided. - if scope.DomainName != "" { + if opts.Scope.DomainName != "" { return nil, ErrScopeDomainIDOrDomainName{} } // DomainID return map[string]interface{}{ "domain": map[string]interface{}{ - "id": &scope.DomainID, + "id": &opts.Scope.DomainID, }, }, nil - } else if scope.DomainName != "" { + } else if opts.Scope.DomainName != "" { // DomainName return map[string]interface{}{ "domain": map[string]interface{}{ - "name": &scope.DomainName, + "name": &opts.Scope.DomainName, }, }, nil } diff --git a/vendor/github.com/gophercloud/gophercloud/errors.go b/vendor/github.com/gophercloud/gophercloud/errors.go index 2466932efe4..a5fa68d6d55 100644 --- a/vendor/github.com/gophercloud/gophercloud/errors.go +++ b/vendor/github.com/gophercloud/gophercloud/errors.go @@ -1,6 +1,9 @@ package gophercloud -import "fmt" +import ( + "fmt" + "strings" +) // BaseError is an error type that all other error types embed. type BaseError struct { @@ -43,6 +46,33 @@ func (e ErrInvalidInput) Error() string { return e.choseErrString() } +// ErrMissingEnvironmentVariable is the error when environment variable is required +// in a particular situation but not provided by the user +type ErrMissingEnvironmentVariable struct { + BaseError + EnvironmentVariable string +} + +func (e ErrMissingEnvironmentVariable) Error() string { + e.DefaultErrString = fmt.Sprintf("Missing environment variable [%s]", e.EnvironmentVariable) + return e.choseErrString() +} + +// ErrMissingAnyoneOfEnvironmentVariables is the error when anyone of the environment variables +// is required in a particular situation but not provided by the user +type ErrMissingAnyoneOfEnvironmentVariables struct { + BaseError + EnvironmentVariables []string +} + +func (e ErrMissingAnyoneOfEnvironmentVariables) Error() string { + e.DefaultErrString = fmt.Sprintf( + "Missing one of the following environment variables [%s]", + strings.Join(e.EnvironmentVariables, ", "), + ) + return e.choseErrString() +} + // ErrUnexpectedResponseCode is returned by the Request method when a response code other than // those listed in OkCodes is encountered. type ErrUnexpectedResponseCode struct { @@ -108,7 +138,11 @@ type ErrDefault503 struct { } func (e ErrDefault400) Error() string { - return "Invalid request due to incorrect syntax or missing required parameters." + e.DefaultErrString = fmt.Sprintf( + "Bad request with: [%s %s], error message: %s", + e.Method, e.URL, e.Body, + ) + return e.choseErrString() } func (e ErrDefault401) Error() string { return "Authentication failed" diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/auth_env.go b/vendor/github.com/gophercloud/gophercloud/openstack/auth_env.go index b5482ba8c9f..994b5550c91 100644 --- a/vendor/github.com/gophercloud/gophercloud/openstack/auth_env.go +++ b/vendor/github.com/gophercloud/gophercloud/openstack/auth_env.go @@ -50,17 +50,23 @@ func AuthOptionsFromEnv() (gophercloud.AuthOptions, error) { } if authURL == "" { - err := gophercloud.ErrMissingInput{Argument: "authURL"} + err := gophercloud.ErrMissingEnvironmentVariable{ + EnvironmentVariable: "OS_AUTH_URL", + } return nilOptions, err } if username == "" && userID == "" { - err := gophercloud.ErrMissingInput{Argument: "username"} + err := gophercloud.ErrMissingAnyoneOfEnvironmentVariables{ + EnvironmentVariables: []string{"OS_USERNAME", "OS_USERID"}, + } return nilOptions, err } if password == "" { - err := gophercloud.ErrMissingInput{Argument: "password"} + err := gophercloud.ErrMissingEnvironmentVariable{ + EnvironmentVariable: "OS_PASSWORD", + } return nilOptions, err } diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/client.go b/vendor/github.com/gophercloud/gophercloud/openstack/client.go index 5a52e579148..e554b7bc370 100644 --- a/vendor/github.com/gophercloud/gophercloud/openstack/client.go +++ b/vendor/github.com/gophercloud/gophercloud/openstack/client.go @@ -2,10 +2,7 @@ package openstack import ( "fmt" - "net/url" "reflect" - "regexp" - "strings" "github.com/gophercloud/gophercloud" tokens2 "github.com/gophercloud/gophercloud/openstack/identity/v2/tokens" @@ -38,21 +35,11 @@ A basic example of using this would be: client, err := openstack.NewIdentityV3(provider, gophercloud.EndpointOpts{}) */ func NewClient(endpoint string) (*gophercloud.ProviderClient, error) { - u, err := url.Parse(endpoint) + base, err := utils.BaseEndpoint(endpoint) if err != nil { return nil, err } - u.RawQuery, u.Fragment = "", "" - - var base string - versionRe := regexp.MustCompile("v[0-9.]+/?") - if version := versionRe.FindString(u.Path); version != "" { - base = strings.Replace(u.String(), version, "", -1) - } else { - base = u.String() - } - endpoint = gophercloud.NormalizeURL(endpoint) base = gophercloud.NormalizeURL(base) @@ -287,11 +274,17 @@ func NewIdentityV3(client *gophercloud.ProviderClient, eo gophercloud.EndpointOp // Ensure endpoint still has a suffix of v3. // This is because EndpointLocator might have found a versionless - // endpoint and requests will fail unless targeted at /v3. - if !strings.HasSuffix(endpoint, "v3/") { - endpoint = endpoint + "v3/" + // endpoint or the published endpoint is still /v2.0. In both + // cases, we need to fix the endpoint to point to /v3. + base, err := utils.BaseEndpoint(endpoint) + if err != nil { + return nil, err } + base = gophercloud.NormalizeURL(base) + + endpoint = base + "v3/" + return &gophercloud.ServiceClient{ ProviderClient: client, Endpoint: endpoint, @@ -394,3 +387,22 @@ func NewLoadBalancerV2(client *gophercloud.ProviderClient, eo gophercloud.Endpoi sc.ResourceBase = sc.Endpoint + "v2.0/" return sc, err } + +// NewClusteringV1 creates a ServiceClient that may be used with the v1 clustering +// package. +func NewClusteringV1(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { + return initClientOpts(client, eo, "clustering") +} + +// NewMessagingV2 creates a ServiceClient that may be used with the v2 messaging +// service. +func NewMessagingV2(client *gophercloud.ProviderClient, clientID string, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { + sc, err := initClientOpts(client, eo, "messaging") + sc.MoreHeaders = map[string]string{"Client-ID": clientID} + return sc, err +} + +// NewContainerV1 creates a ServiceClient that may be used with v1 container package +func NewContainerV1(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { + return initClientOpts(client, eo, "container") +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/tokens/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/tokens/requests.go index ca35851e4a4..6e99a793c53 100644 --- a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/tokens/requests.go +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/tokens/requests.go @@ -72,72 +72,15 @@ func (opts *AuthOptions) ToTokenV3CreateMap(scope map[string]interface{}) (map[s // ToTokenV3CreateMap builds a scope request body from AuthOptions. func (opts *AuthOptions) ToTokenV3ScopeMap() (map[string]interface{}, error) { - if opts.Scope.ProjectName != "" { - // ProjectName provided: either DomainID or DomainName must also be supplied. - // ProjectID may not be supplied. - if opts.Scope.DomainID == "" && opts.Scope.DomainName == "" { - return nil, gophercloud.ErrScopeDomainIDOrDomainName{} - } - if opts.Scope.ProjectID != "" { - return nil, gophercloud.ErrScopeProjectIDOrProjectName{} - } - - if opts.Scope.DomainID != "" { - // ProjectName + DomainID - return map[string]interface{}{ - "project": map[string]interface{}{ - "name": &opts.Scope.ProjectName, - "domain": map[string]interface{}{"id": &opts.Scope.DomainID}, - }, - }, nil - } - - if opts.Scope.DomainName != "" { - // ProjectName + DomainName - return map[string]interface{}{ - "project": map[string]interface{}{ - "name": &opts.Scope.ProjectName, - "domain": map[string]interface{}{"name": &opts.Scope.DomainName}, - }, - }, nil - } - } else if opts.Scope.ProjectID != "" { - // ProjectID provided. ProjectName, DomainID, and DomainName may not be provided. - if opts.Scope.DomainID != "" { - return nil, gophercloud.ErrScopeProjectIDAlone{} - } - if opts.Scope.DomainName != "" { - return nil, gophercloud.ErrScopeProjectIDAlone{} - } - - // ProjectID - return map[string]interface{}{ - "project": map[string]interface{}{ - "id": &opts.Scope.ProjectID, - }, - }, nil - } else if opts.Scope.DomainID != "" { - // DomainID provided. ProjectID, ProjectName, and DomainName may not be provided. - if opts.Scope.DomainName != "" { - return nil, gophercloud.ErrScopeDomainIDOrDomainName{} - } - - // DomainID - return map[string]interface{}{ - "domain": map[string]interface{}{ - "id": &opts.Scope.DomainID, - }, - }, nil - } else if opts.Scope.DomainName != "" { - // DomainName - return map[string]interface{}{ - "domain": map[string]interface{}{ - "name": &opts.Scope.DomainName, - }, - }, nil + scope := gophercloud.AuthScope(opts.Scope) + + gophercloudAuthOpts := gophercloud.AuthOptions{ + Scope: &scope, + DomainID: opts.DomainID, + DomainName: opts.DomainName, } - return nil, nil + return gophercloudAuthOpts.ToTokenV3ScopeMap() } func (opts *AuthOptions) CanReauth() bool { @@ -190,7 +133,7 @@ func Get(c *gophercloud.ServiceClient, token string) (r GetResult) { // Validate determines if a specified token is valid or not. func Validate(c *gophercloud.ServiceClient, token string) (bool, error) { - resp, err := c.Request("HEAD", tokenURL(c), &gophercloud.RequestOpts{ + resp, err := c.Head(tokenURL(c), &gophercloud.RequestOpts{ MoreHeaders: subjectTokenHeaders(c, token), OkCodes: []int{200, 204, 404}, }) diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/utils/base_endpoint.go b/vendor/github.com/gophercloud/gophercloud/openstack/utils/base_endpoint.go new file mode 100644 index 00000000000..d6f9e34eaa1 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/utils/base_endpoint.go @@ -0,0 +1,29 @@ +package utils + +import ( + "net/url" + "regexp" + "strings" +) + +// BaseEndpoint will return a URL without the /vX.Y +// portion of the URL. +func BaseEndpoint(endpoint string) (string, error) { + var base string + + u, err := url.Parse(endpoint) + if err != nil { + return base, err + } + + u.RawQuery, u.Fragment = "", "" + + versionRe := regexp.MustCompile("v[0-9.]+/?") + if version := versionRe.FindString(u.Path); version != "" { + base = strings.Replace(u.String(), version, "", -1) + } else { + base = u.String() + } + + return base, nil +} diff --git a/vendor/github.com/gophercloud/gophercloud/params.go b/vendor/github.com/gophercloud/gophercloud/params.go index 28ad9068565..19b8cf7bf84 100644 --- a/vendor/github.com/gophercloud/gophercloud/params.go +++ b/vendor/github.com/gophercloud/gophercloud/params.go @@ -363,9 +363,8 @@ func BuildQueryString(opts interface{}) (*url.URL, error) { } } } else { - // Otherwise, the field is not set. - if len(tags) == 2 && tags[1] == "required" { - // And the field is required. Return an error. + // if the field has a 'required' tag, it can't have a zero-value + if requiredTag := f.Tag.Get("required"); requiredTag == "true" { return &url.URL{}, fmt.Errorf("Required query parameter [%s] not set.", f.Name) } } @@ -439,10 +438,9 @@ func BuildHeaders(opts interface{}) (map[string]string, error) { optsMap[tags[0]] = strconv.FormatBool(v.Bool()) } } else { - // Otherwise, the field is not set. - if len(tags) == 2 && tags[1] == "required" { - // And the field is required. Return an error. - return optsMap, fmt.Errorf("Required header not set.") + // if the field has a 'required' tag, it can't have a zero-value + if requiredTag := f.Tag.Get("required"); requiredTag == "true" { + return optsMap, fmt.Errorf("Required header [%s] not set.", f.Name) } } } diff --git a/vendor/github.com/gophercloud/gophercloud/provider_client.go b/vendor/github.com/gophercloud/gophercloud/provider_client.go index e93c236e179..17e45127435 100644 --- a/vendor/github.com/gophercloud/gophercloud/provider_client.go +++ b/vendor/github.com/gophercloud/gophercloud/provider_client.go @@ -126,6 +126,36 @@ func (client *ProviderClient) SetToken(t string) { client.TokenID = t } +//Reauthenticate calls client.ReauthFunc in a thread-safe way. If this is +//called because of a 401 response, the caller may pass the previous token. In +//this case, the reauthentication can be skipped if another thread has already +//reauthenticated in the meantime. If no previous token is known, an empty +//string should be passed instead to force unconditional reauthentication. +func (client *ProviderClient) Reauthenticate(previousToken string) (err error) { + if client.ReauthFunc == nil { + return nil + } + + if client.mut == nil { + return client.ReauthFunc() + } + client.mut.Lock() + defer client.mut.Unlock() + + client.reauthmut.Lock() + client.reauthmut.reauthing = true + client.reauthmut.Unlock() + + if previousToken == "" || client.TokenID == previousToken { + err = client.ReauthFunc() + } + + client.reauthmut.Lock() + client.reauthmut.reauthing = false + client.reauthmut.Unlock() + return +} + // RequestOpts customizes the behavior of the provider.Request() method. type RequestOpts struct { // JSONBody, if provided, will be encoded as JSON and used as the body of the HTTP request. The @@ -254,21 +284,7 @@ func (client *ProviderClient) Request(method, url string, options *RequestOpts) } case http.StatusUnauthorized: if client.ReauthFunc != nil { - if client.mut != nil { - client.mut.Lock() - client.reauthmut.Lock() - client.reauthmut.reauthing = true - client.reauthmut.Unlock() - if curtok := client.TokenID; curtok == prereqtok { - err = client.ReauthFunc() - } - client.reauthmut.Lock() - client.reauthmut.reauthing = false - client.reauthmut.Unlock() - client.mut.Unlock() - } else { - err = client.ReauthFunc() - } + err = client.Reauthenticate(prereqtok) if err != nil { e := &ErrUnableToReauthenticate{} e.ErrOriginal = respErr diff --git a/vendor/github.com/gophercloud/gophercloud/results.go b/vendor/github.com/gophercloud/gophercloud/results.go index e64feee19ed..fdd4830ec1f 100644 --- a/vendor/github.com/gophercloud/gophercloud/results.go +++ b/vendor/github.com/gophercloud/gophercloud/results.go @@ -345,6 +345,27 @@ func (jt *JSONRFC3339NoZ) UnmarshalJSON(data []byte) error { return nil } +// RFC3339ZNoT is the time format used in Zun (Containers Service). +const RFC3339ZNoT = "2006-01-02 15:04:05-07:00" + +type JSONRFC3339ZNoT time.Time + +func (jt *JSONRFC3339ZNoT) UnmarshalJSON(data []byte) error { + var s string + if err := json.Unmarshal(data, &s); err != nil { + return err + } + if s == "" { + return nil + } + t, err := time.Parse(RFC3339ZNoT, s) + if err != nil { + return err + } + *jt = JSONRFC3339ZNoT(t) + return nil +} + /* Link is an internal type to be used in packages of collection resources that are paginated in a certain way. diff --git a/vendor/github.com/gophercloud/gophercloud/service_client.go b/vendor/github.com/gophercloud/gophercloud/service_client.go index d1a48fea35b..2734510e1b4 100644 --- a/vendor/github.com/gophercloud/gophercloud/service_client.go +++ b/vendor/github.com/gophercloud/gophercloud/service_client.go @@ -28,6 +28,10 @@ type ServiceClient struct { // The microversion of the service to use. Set this to use a particular microversion. Microversion string + + // MoreHeaders allows users (or Gophercloud) to set service-wide headers on requests. Put another way, + // values set in this field will be set on all the HTTP requests the service client sends. + MoreHeaders map[string]string } // ResourceBaseURL returns the base URL of any resources used by this service. It MUST end with a /. @@ -108,6 +112,15 @@ func (client *ServiceClient) Delete(url string, opts *RequestOpts) (*http.Respon return client.Request("DELETE", url, opts) } +// Head calls `Request` with the "HEAD" HTTP verb. +func (client *ServiceClient) Head(url string, opts *RequestOpts) (*http.Response, error) { + if opts == nil { + opts = new(RequestOpts) + } + client.initReqOpts(url, nil, nil, opts) + return client.Request("HEAD", url, opts) +} + func (client *ServiceClient) setMicroversionHeader(opts *RequestOpts) { switch client.Type { case "compute": @@ -122,3 +135,16 @@ func (client *ServiceClient) setMicroversionHeader(opts *RequestOpts) { opts.MoreHeaders["OpenStack-API-Version"] = client.Type + " " + client.Microversion } } + +// Request carries out the HTTP operation for the service client +func (client *ServiceClient) Request(method, url string, options *RequestOpts) (*http.Response, error) { + if len(client.MoreHeaders) > 0 { + if options == nil { + options = new(RequestOpts) + } + for k, v := range client.MoreHeaders { + options.MoreHeaders[k] = v + } + } + return client.ProviderClient.Request(method, url, options) +} diff --git a/vendor/github.com/gorilla/context/.travis.yml b/vendor/github.com/gorilla/context/.travis.yml index faca4dad3dc..6f440f1e42d 100644 --- a/vendor/github.com/gorilla/context/.travis.yml +++ b/vendor/github.com/gorilla/context/.travis.yml @@ -7,13 +7,13 @@ matrix: - go: 1.4 - go: 1.5 - go: 1.6 + - go: 1.7 + - go: tip + allow_failures: - go: tip - -install: - - go get golang.org/x/tools/cmd/vet script: - go get -t -v ./... - diff -u <(echo -n) <(gofmt -d .) - - go tool vet . + - go vet $(go list ./... | grep -v /vendor/) - go test -v -race ./... diff --git a/vendor/github.com/gorilla/context/README.md b/vendor/github.com/gorilla/context/README.md index c60a31b053b..08f86693bcd 100644 --- a/vendor/github.com/gorilla/context/README.md +++ b/vendor/github.com/gorilla/context/README.md @@ -4,4 +4,7 @@ context gorilla/context is a general purpose registry for global request variables. +> Note: gorilla/context, having been born well before `context.Context` existed, does not play well +> with the shallow copying of the request that [`http.Request.WithContext`](https://golang.org/pkg/net/http/#Request.WithContext) (added to net/http Go 1.7 onwards) performs. You should either use *just* gorilla/context, or moving forward, the new `http.Request.Context()`. + Read the full documentation here: http://www.gorillatoolkit.org/pkg/context diff --git a/vendor/github.com/gorilla/context/doc.go b/vendor/github.com/gorilla/context/doc.go index 73c7400311e..448d1bfcac6 100644 --- a/vendor/github.com/gorilla/context/doc.go +++ b/vendor/github.com/gorilla/context/doc.go @@ -5,6 +5,12 @@ /* Package context stores values shared during a request lifetime. +Note: gorilla/context, having been born well before `context.Context` existed, +does not play well > with the shallow copying of the request that +[`http.Request.WithContext`](https://golang.org/pkg/net/http/#Request.WithContext) +(added to net/http Go 1.7 onwards) performs. You should either use *just* +gorilla/context, or moving forward, the new `http.Request.Context()`. + For example, a router can set variables extracted from the URL and later application handlers can access those values, or it can be used to store sessions values to be saved at the end of a request. There are several diff --git a/vendor/github.com/gorilla/mux/.travis.yml b/vendor/github.com/gorilla/mux/.travis.yml index 3302233f3c3..ad0935dbd37 100644 --- a/vendor/github.com/gorilla/mux/.travis.yml +++ b/vendor/github.com/gorilla/mux/.travis.yml @@ -3,11 +3,12 @@ sudo: false matrix: include: - - go: 1.5 - - go: 1.6 - - go: 1.7 - - go: 1.8 - - go: 1.9 + - go: 1.5.x + - go: 1.6.x + - go: 1.7.x + - go: 1.8.x + - go: 1.9.x + - go: 1.10.x - go: tip allow_failures: - go: tip diff --git a/vendor/github.com/gorilla/mux/README.md b/vendor/github.com/gorilla/mux/README.md index f9b3103f068..e424397ac4d 100644 --- a/vendor/github.com/gorilla/mux/README.md +++ b/vendor/github.com/gorilla/mux/README.md @@ -1,5 +1,5 @@ -gorilla/mux -=== +# gorilla/mux + [![GoDoc](https://godoc.org/github.com/gorilla/mux?status.svg)](https://godoc.org/github.com/gorilla/mux) [![Build Status](https://travis-ci.org/gorilla/mux.svg?branch=master)](https://travis-ci.org/gorilla/mux) [![Sourcegraph](https://sourcegraph.com/github.com/gorilla/mux/-/badge.svg)](https://sourcegraph.com/github.com/gorilla/mux?badge) @@ -29,6 +29,7 @@ The name mux stands for "HTTP request multiplexer". Like the standard `http.Serv * [Walking Routes](#walking-routes) * [Graceful Shutdown](#graceful-shutdown) * [Middleware](#middleware) +* [Testing Handlers](#testing-handlers) * [Full Example](#full-example) --- @@ -178,70 +179,13 @@ s.HandleFunc("/{key}/", ProductHandler) // "/products/{key}/details" s.HandleFunc("/{key}/details", ProductDetailsHandler) ``` -### Listing Routes - -Routes on a mux can be listed using the Router.Walk method—useful for generating documentation: - -```go -package main - -import ( - "fmt" - "net/http" - "strings" - - "github.com/gorilla/mux" -) - -func handler(w http.ResponseWriter, r *http.Request) { - return -} -func main() { - r := mux.NewRouter() - r.HandleFunc("/", handler) - r.HandleFunc("/products", handler).Methods("POST") - r.HandleFunc("/articles", handler).Methods("GET") - r.HandleFunc("/articles/{id}", handler).Methods("GET", "PUT") - r.HandleFunc("/authors", handler).Queries("surname", "{surname}") - r.Walk(func(route *mux.Route, router *mux.Router, ancestors []*mux.Route) error { - t, err := route.GetPathTemplate() - if err != nil { - return err - } - qt, err := route.GetQueriesTemplates() - if err != nil { - return err - } - // p will contain regular expression is compatible with regular expression in Perl, Python, and other languages. - // for instance the regular expression for path '/articles/{id}' will be '^/articles/(?P[^/]+)$' - p, err := route.GetPathRegexp() - if err != nil { - return err - } - // qr will contain a list of regular expressions with the same semantics as GetPathRegexp, - // just applied to the Queries pairs instead, e.g., 'Queries("surname", "{surname}") will return - // {"^surname=(?P.*)$}. Where each combined query pair will have an entry in the list. - qr, err := route.GetQueriesRegexp() - if err != nil { - return err - } - m, err := route.GetMethods() - if err != nil { - return err - } - fmt.Println(strings.Join(m, ","), strings.Join(qt, ","), strings.Join(qr, ","), t, p) - return nil - }) - http.Handle("/", r) -} -``` ### Static Files Note that the path provided to `PathPrefix()` represents a "wildcard": calling `PathPrefix("/static/").Handler(...)` means that the handler will be passed any -request that matches "/static/*". This makes it easy to serve static files with mux: +request that matches "/static/\*". This makes it easy to serve static files with mux: ```go func main() { @@ -348,41 +292,58 @@ The `Walk` function on `mux.Router` can be used to visit all of the routes that the following prints all of the registered routes: ```go -r := mux.NewRouter() -r.HandleFunc("/", handler) -r.HandleFunc("/products", handler).Methods("POST") -r.HandleFunc("/articles", handler).Methods("GET") -r.HandleFunc("/articles/{id}", handler).Methods("GET", "PUT") -r.HandleFunc("/authors", handler).Queries("surname", "{surname}") -r.Walk(func(route *mux.Route, router *mux.Router, ancestors []*mux.Route) error { - t, err := route.GetPathTemplate() - if err != nil { - return err - } - qt, err := route.GetQueriesTemplates() - if err != nil { - return err - } - // p will contain a regular expression that is compatible with regular expressions in Perl, Python, and other languages. - // For example, the regular expression for path '/articles/{id}' will be '^/articles/(?P[^/]+)$'. - p, err := route.GetPathRegexp() - if err != nil { - return err - } - // qr will contain a list of regular expressions with the same semantics as GetPathRegexp, - // just applied to the Queries pairs instead, e.g., 'Queries("surname", "{surname}") will return - // {"^surname=(?P.*)$}. Where each combined query pair will have an entry in the list. - qr, err := route.GetQueriesRegexp() - if err != nil { - return err - } - m, err := route.GetMethods() - if err != nil { - return err - } - fmt.Println(strings.Join(m, ","), strings.Join(qt, ","), strings.Join(qr, ","), t, p) - return nil -}) +package main + +import ( + "fmt" + "net/http" + "strings" + + "github.com/gorilla/mux" +) + +func handler(w http.ResponseWriter, r *http.Request) { + return +} + +func main() { + r := mux.NewRouter() + r.HandleFunc("/", handler) + r.HandleFunc("/products", handler).Methods("POST") + r.HandleFunc("/articles", handler).Methods("GET") + r.HandleFunc("/articles/{id}", handler).Methods("GET", "PUT") + r.HandleFunc("/authors", handler).Queries("surname", "{surname}") + err := r.Walk(func(route *mux.Route, router *mux.Router, ancestors []*mux.Route) error { + pathTemplate, err := route.GetPathTemplate() + if err == nil { + fmt.Println("ROUTE:", pathTemplate) + } + pathRegexp, err := route.GetPathRegexp() + if err == nil { + fmt.Println("Path regexp:", pathRegexp) + } + queriesTemplates, err := route.GetQueriesTemplates() + if err == nil { + fmt.Println("Queries templates:", strings.Join(queriesTemplates, ",")) + } + queriesRegexps, err := route.GetQueriesRegexp() + if err == nil { + fmt.Println("Queries regexps:", strings.Join(queriesRegexps, ",")) + } + methods, err := route.GetMethods() + if err == nil { + fmt.Println("Methods:", strings.Join(methods, ",")) + } + fmt.Println() + return nil + }) + + if err != nil { + fmt.Println(err) + } + + http.Handle("/", r) +} ``` ### Graceful Shutdown @@ -399,6 +360,7 @@ import ( "net/http" "os" "os/signal" + "time" "github.com/gorilla/mux" ) @@ -410,7 +372,7 @@ func main() { r := mux.NewRouter() // Add your routes as needed - + srv := &http.Server{ Addr: "0.0.0.0:8080", // Good practice to set timeouts to avoid Slowloris attacks. @@ -426,7 +388,7 @@ func main() { log.Println(err) } }() - + c := make(chan os.Signal, 1) // We'll accept graceful shutdowns when quit via SIGINT (Ctrl+C) // SIGKILL, SIGQUIT or SIGTERM (Ctrl+/) will not be caught. @@ -436,7 +398,8 @@ func main() { <-c // Create a deadline to wait for. - ctx, cancel := context.WithTimeout(ctx, wait) + ctx, cancel := context.WithTimeout(context.Background(), wait) + defer cancel() // Doesn't block if no connections, but will otherwise wait // until the timeout deadline. srv.Shutdown(ctx) @@ -464,7 +427,7 @@ Typically, the returned handler is a closure which does something with the http. A very basic middleware which logs the URI of the request being handled could be written as: ```go -func simpleMw(next http.Handler) http.Handler { +func loggingMiddleware(next http.Handler) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { // Do stuff here log.Println(r.RequestURI) @@ -474,12 +437,12 @@ func simpleMw(next http.Handler) http.Handler { } ``` -Middlewares can be added to a router using `Router.AddMiddlewareFunc()`: +Middlewares can be added to a router using `Router.Use()`: ```go r := mux.NewRouter() r.HandleFunc("/", handler) -r.AddMiddleware(simpleMw) +r.Use(loggingMiddleware) ``` A more complex authentication middleware, which maps session token to users, could be written as: @@ -502,7 +465,7 @@ func (amw *authenticationMiddleware) Populate() { func (amw *authenticationMiddleware) Middleware(next http.Handler) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { token := r.Header.Get("X-Session-Token") - + if user, found := amw.tokenUsers[token]; found { // We found the token in our map log.Printf("Authenticated user %s\n", user) @@ -510,7 +473,7 @@ func (amw *authenticationMiddleware) Middleware(next http.Handler) http.Handler next.ServeHTTP(w, r) } else { // Write an error and stop the handler chain - http.Error(w, "Forbidden", 403) + http.Error(w, "Forbidden", http.StatusForbidden) } }) } @@ -523,10 +486,136 @@ r.HandleFunc("/", handler) amw := authenticationMiddleware{} amw.Populate() -r.AddMiddlewareFunc(amw.Middleware) +r.Use(amw.Middleware) +``` + +Note: The handler chain will be stopped if your middleware doesn't call `next.ServeHTTP()` with the corresponding parameters. This can be used to abort a request if the middleware writer wants to. Middlewares _should_ write to `ResponseWriter` if they _are_ going to terminate the request, and they _should not_ write to `ResponseWriter` if they _are not_ going to terminate it. + +### Testing Handlers + +Testing handlers in a Go web application is straightforward, and _mux_ doesn't complicate this any further. Given two files: `endpoints.go` and `endpoints_test.go`, here's how we'd test an application using _mux_. + +First, our simple HTTP handler: + +```go +// endpoints.go +package main + +func HealthCheckHandler(w http.ResponseWriter, r *http.Request) { + // A very simple health check. + w.WriteHeader(http.StatusOK) + w.Header().Set("Content-Type", "application/json") + + // In the future we could report back on the status of our DB, or our cache + // (e.g. Redis) by performing a simple PING, and include them in the response. + io.WriteString(w, `{"alive": true}`) +} + +func main() { + r := mux.NewRouter() + r.HandleFunc("/health", HealthCheckHandler) + + log.Fatal(http.ListenAndServe("localhost:8080", r)) +} +``` + +Our test code: + +```go +// endpoints_test.go +package main + +import ( + "net/http" + "net/http/httptest" + "testing" +) + +func TestHealthCheckHandler(t *testing.T) { + // Create a request to pass to our handler. We don't have any query parameters for now, so we'll + // pass 'nil' as the third parameter. + req, err := http.NewRequest("GET", "/health", nil) + if err != nil { + t.Fatal(err) + } + + // We create a ResponseRecorder (which satisfies http.ResponseWriter) to record the response. + rr := httptest.NewRecorder() + handler := http.HandlerFunc(HealthCheckHandler) + + // Our handlers satisfy http.Handler, so we can call their ServeHTTP method + // directly and pass in our Request and ResponseRecorder. + handler.ServeHTTP(rr, req) + + // Check the status code is what we expect. + if status := rr.Code; status != http.StatusOK { + t.Errorf("handler returned wrong status code: got %v want %v", + status, http.StatusOK) + } + + // Check the response body is what we expect. + expected := `{"alive": true}` + if rr.Body.String() != expected { + t.Errorf("handler returned unexpected body: got %v want %v", + rr.Body.String(), expected) + } +} +``` + +In the case that our routes have [variables](#examples), we can pass those in the request. We could write +[table-driven tests](https://dave.cheney.net/2013/06/09/writing-table-driven-tests-in-go) to test multiple +possible route variables as needed. + +```go +// endpoints.go +func main() { + r := mux.NewRouter() + // A route with a route variable: + r.HandleFunc("/metrics/{type}", MetricsHandler) + + log.Fatal(http.ListenAndServe("localhost:8080", r)) +} ``` -Note: The handler chain will be stopped if your middleware doesn't call `next.ServeHTTP()` with the corresponding parameters. This can be used to abort a request if the middleware writer wants to. Middlewares *should* write to `ResponseWriter` if they *are* going to terminate the request, and they *should not* write to `ResponseWriter` if they *are not* going to terminate it. +Our test file, with a table-driven test of `routeVariables`: + +```go +// endpoints_test.go +func TestMetricsHandler(t *testing.T) { + tt := []struct{ + routeVariable string + shouldPass bool + }{ + {"goroutines", true}, + {"heap", true}, + {"counters", true}, + {"queries", true}, + {"adhadaeqm3k", false}, + } + + for _, tc := range tt { + path := fmt.Sprintf("/metrics/%s", tc.routeVariable) + req, err := http.NewRequest("GET", path, nil) + if err != nil { + t.Fatal(err) + } + + rr := httptest.NewRecorder() + + // Need to create a router that we can pass the request through so that the vars will be added to the context + router := mux.NewRouter() + router.HandleFunc("/metrics/{type}", MetricsHandler) + router.ServeHTTP(rr, req) + + // In this case, our MetricsHandler returns a non-200 response + // for a route variable it doesn't know about. + if rr.Code == http.StatusOK && !tc.shouldPass { + t.Errorf("handler should have failed on routeVariable %s: got %v want %v", + tc.routeVariable, rr.Code, http.StatusOK) + } + } +} +``` ## Full Example diff --git a/vendor/github.com/gorilla/mux/doc.go b/vendor/github.com/gorilla/mux/doc.go index 013f088985b..38957deead3 100644 --- a/vendor/github.com/gorilla/mux/doc.go +++ b/vendor/github.com/gorilla/mux/doc.go @@ -239,8 +239,7 @@ as well: "category", "technology", "id", "42") -Since **vX.Y.Z**, mux supports the addition of middlewares to a [Router](https://godoc.org/github.com/gorilla/mux#Router), which are executed if a -match is found (including subrouters). Middlewares are defined using the de facto standard type: +Mux supports the addition of middlewares to a Router, which are executed in the order they are added if a match is found, including its subrouters. Middlewares are (typically) small pieces of code which take one request, do something with it, and pass it down to another middleware or the final handler. Some common use cases for middleware are request logging, header manipulation, or ResponseWriter hijacking. type MiddlewareFunc func(http.Handler) http.Handler @@ -261,7 +260,7 @@ Middlewares can be added to a router using `Router.Use()`: r := mux.NewRouter() r.HandleFunc("/", handler) - r.AddMiddleware(simpleMw) + r.Use(simpleMw) A more complex authentication middleware, which maps session token to users, could be written as: @@ -288,7 +287,7 @@ A more complex authentication middleware, which maps session token to users, cou log.Printf("Authenticated user %s\n", user) next.ServeHTTP(w, r) } else { - http.Error(w, "Forbidden", 403) + http.Error(w, "Forbidden", http.StatusForbidden) } }) } diff --git a/vendor/github.com/gorilla/mux/middleware.go b/vendor/github.com/gorilla/mux/middleware.go index 8f898675ea7..ceb812cee28 100644 --- a/vendor/github.com/gorilla/mux/middleware.go +++ b/vendor/github.com/gorilla/mux/middleware.go @@ -1,6 +1,9 @@ package mux -import "net/http" +import ( + "net/http" + "strings" +) // MiddlewareFunc is a function which receives an http.Handler and returns another http.Handler. // Typically, the returned handler is a closure which does something with the http.ResponseWriter and http.Request passed @@ -12,17 +15,58 @@ type middleware interface { Middleware(handler http.Handler) http.Handler } -// MiddlewareFunc also implements the middleware interface. +// Middleware allows MiddlewareFunc to implement the middleware interface. func (mw MiddlewareFunc) Middleware(handler http.Handler) http.Handler { return mw(handler) } // Use appends a MiddlewareFunc to the chain. Middleware can be used to intercept or otherwise modify requests and/or responses, and are executed in the order that they are applied to the Router. -func (r *Router) Use(mwf MiddlewareFunc) { - r.middlewares = append(r.middlewares, mwf) +func (r *Router) Use(mwf ...MiddlewareFunc) { + for _, fn := range mwf { + r.middlewares = append(r.middlewares, fn) + } } // useInterface appends a middleware to the chain. Middleware can be used to intercept or otherwise modify requests and/or responses, and are executed in the order that they are applied to the Router. func (r *Router) useInterface(mw middleware) { r.middlewares = append(r.middlewares, mw) } + +// CORSMethodMiddleware sets the Access-Control-Allow-Methods response header +// on a request, by matching routes based only on paths. It also handles +// OPTIONS requests, by settings Access-Control-Allow-Methods, and then +// returning without calling the next http handler. +func CORSMethodMiddleware(r *Router) MiddlewareFunc { + return func(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + var allMethods []string + + err := r.Walk(func(route *Route, _ *Router, _ []*Route) error { + for _, m := range route.matchers { + if _, ok := m.(*routeRegexp); ok { + if m.Match(req, &RouteMatch{}) { + methods, err := route.GetMethods() + if err != nil { + return err + } + + allMethods = append(allMethods, methods...) + } + break + } + } + return nil + }) + + if err == nil { + w.Header().Set("Access-Control-Allow-Methods", strings.Join(append(allMethods, "OPTIONS"), ",")) + + if req.Method == "OPTIONS" { + return + } + } + + next.ServeHTTP(w, req) + }) + } +} diff --git a/vendor/github.com/gorilla/mux/mux.go b/vendor/github.com/gorilla/mux/mux.go index efabd241751..4bbafa51da3 100644 --- a/vendor/github.com/gorilla/mux/mux.go +++ b/vendor/github.com/gorilla/mux/mux.go @@ -13,8 +13,11 @@ import ( ) var ( + // ErrMethodMismatch is returned when the method in the request does not match + // the method defined against the route. ErrMethodMismatch = errors.New("method is not allowed") - ErrNotFound = errors.New("no matching route was found") + // ErrNotFound is returned when no route match is found. + ErrNotFound = errors.New("no matching route was found") ) // NewRouter returns a new router instance. @@ -95,9 +98,9 @@ func (r *Router) Match(req *http.Request, match *RouteMatch) bool { if r.MethodNotAllowedHandler != nil { match.Handler = r.MethodNotAllowedHandler return true - } else { - return false } + + return false } // Closest match for a router (includes sub-routers) diff --git a/vendor/github.com/gorilla/mux/route.go b/vendor/github.com/gorilla/mux/route.go index 4ce098d4fbf..a591d735450 100644 --- a/vendor/github.com/gorilla/mux/route.go +++ b/vendor/github.com/gorilla/mux/route.go @@ -43,6 +43,8 @@ type Route struct { buildVarsFunc BuildVarsFunc } +// SkipClean reports whether path cleaning is enabled for this route via +// Router.SkipClean. func (r *Route) SkipClean() bool { return r.skipClean } @@ -622,7 +624,7 @@ func (r *Route) GetPathRegexp() (string, error) { // route queries. // This is useful for building simple REST API documentation and for instrumentation // against third-party services. -// An empty list will be returned if the route does not have queries. +// An error will be returned if the route does not have queries. func (r *Route) GetQueriesRegexp() ([]string, error) { if r.err != nil { return nil, r.err @@ -641,7 +643,7 @@ func (r *Route) GetQueriesRegexp() ([]string, error) { // query matching. // This is useful for building simple REST API documentation and for instrumentation // against third-party services. -// An empty list will be returned if the route does not define queries. +// An error will be returned if the route does not define queries. func (r *Route) GetQueriesTemplates() ([]string, error) { if r.err != nil { return nil, r.err @@ -659,7 +661,7 @@ func (r *Route) GetQueriesTemplates() ([]string, error) { // GetMethods returns the methods the route matches against // This is useful for building simple REST API documentation and for instrumentation // against third-party services. -// An empty list will be returned if route does not have methods. +// An error will be returned if route does not have methods. func (r *Route) GetMethods() ([]string, error) { if r.err != nil { return nil, r.err @@ -669,7 +671,7 @@ func (r *Route) GetMethods() ([]string, error) { return []string(methods), nil } } - return nil, nil + return nil, errors.New("mux: route doesn't have methods") } // GetHostTemplate returns the template used to build the diff --git a/vendor/github.com/gorilla/mux/test_helpers.go b/vendor/github.com/gorilla/mux/test_helpers.go index 8b2c4a4c580..32ecffde489 100644 --- a/vendor/github.com/gorilla/mux/test_helpers.go +++ b/vendor/github.com/gorilla/mux/test_helpers.go @@ -7,7 +7,8 @@ package mux import "net/http" // SetURLVars sets the URL variables for the given request, to be accessed via -// mux.Vars for testing route behaviour. +// mux.Vars for testing route behaviour. Arguments are not modified, a shallow +// copy is returned. // // This API should only be used for testing purposes; it provides a way to // inject variables into the request context. Alternatively, URL variables diff --git a/vendor/github.com/imdario/mergo/README.md b/vendor/github.com/imdario/mergo/README.md index 4a99d31722c..d1cefa87189 100644 --- a/vendor/github.com/imdario/mergo/README.md +++ b/vendor/github.com/imdario/mergo/README.md @@ -8,10 +8,11 @@ Also a lovely [comune](http://en.wikipedia.org/wiki/Mergo) (municipality) in the It is ready for production use. [It is used in several projects by Docker, Google, The Linux Foundation, VMWare, Shopify, etc](https://github.com/imdario/mergo#mergo-in-the-wild). -[![Build Status][1]][2] [![GoDoc][3]][4] [![GoCard][5]][6] +[![Build Status][1]][2] [![Coverage Status][7]][8] +[![Sourcegraph][9]][10] [1]: https://travis-ci.org/imdario/mergo.png [2]: https://travis-ci.org/imdario/mergo @@ -21,15 +22,28 @@ It is ready for production use. [It is used in several projects by Docker, Googl [6]: https://goreportcard.com/report/github.com/imdario/mergo [7]: https://coveralls.io/repos/github/imdario/mergo/badge.svg?branch=master [8]: https://coveralls.io/github/imdario/mergo?branch=master +[9]: https://sourcegraph.com/github.com/imdario/mergo/-/badge.svg +[10]: https://sourcegraph.com/github.com/imdario/mergo?badge ### Latest release -[Release 0.3.1](https://github.com/imdario/mergo/releases/tag/0.3.1) is an important release because it changes `Merge()`and `Map()` signatures to support [transformers](#transformers). An optional/variadic argument has been added, so it won't break existing code. +[Release v0.3.4](https://github.com/imdario/mergo/releases/tag/v0.3.4). ### Important note +Please keep in mind that in [0.3.2](//github.com/imdario/mergo/releases/tag/0.3.2) Mergo changed `Merge()`and `Map()` signatures to support [transformers](#transformers). An optional/variadic argument has been added, so it won't break existing code. + If you were using Mergo **before** April 6th 2015, please check your project works as intended after updating your local copy with ```go get -u github.com/imdario/mergo```. I apologize for any issue caused by its previous behavior and any future bug that Mergo could cause (I hope it won't!) in existing projects after the change (release 0.2.0). +### Donations + +If Mergo is useful to you, consider buying me a coffee, a beer or making a monthly donation so I can keep building great free software. :heart_eyes: + +Buy Me a Coffee at ko-fi.com +[![Beerpay](https://beerpay.io/imdario/mergo/badge.svg)](https://beerpay.io/imdario/mergo) +[![Beerpay](https://beerpay.io/imdario/mergo/make-wish.svg)](https://beerpay.io/imdario/mergo) +Donate using Liberapay + ### Mergo in the wild - [moby/moby](https://github.com/moby/moby) @@ -84,7 +98,7 @@ If you were using Mergo **before** April 6th 2015, please check your project wor ## Usage -You can only merge same-type structs with exported fields initialized as zero value of their type and same-types maps. Mergo won't merge unexported (private) fields but will do recursively any exported one. Also maps will be merged recursively except for structs inside maps (because they are not addressable using Go reflection). +You can only merge same-type structs with exported fields initialized as zero value of their type and same-types maps. Mergo won't merge unexported (private) fields but will do recursively any exported one. It won't merge empty structs value as [they are not considered zero values](https://golang.org/ref/spec#The_zero_value) either. Also maps will be merged recursively except for structs inside maps (because they are not addressable using Go reflection). ```go if err := mergo.Merge(&dst, src); err != nil { @@ -95,7 +109,7 @@ if err := mergo.Merge(&dst, src); err != nil { Also, you can merge overwriting values using the transformer `WithOverride`. ```go -if err := mergo.Merge(&dst, src, WithOverride); err != nil { +if err := mergo.Merge(&dst, src, mergo.WithOverride); err != nil { // ... } ``` @@ -155,6 +169,7 @@ package main import ( "fmt" + "github.com/imdario/mergo" "reflect" "time" ) @@ -186,7 +201,7 @@ type Snapshot struct { func main() { src := Snapshot{time.Now()} dest := Snapshot{} - mergo.Merge(&dest, src, WithTransformers(timeTransfomer{})) + mergo.Merge(&dest, src, mergo.WithTransformers(timeTransfomer{})) fmt.Println(dest) // Will print // { 2018-01-12 01:15:00 +0000 UTC m=+0.000000001 } diff --git a/vendor/github.com/imdario/mergo/map.go b/vendor/github.com/imdario/mergo/map.go index 20981432922..6ea38e636b6 100644 --- a/vendor/github.com/imdario/mergo/map.go +++ b/vendor/github.com/imdario/mergo/map.go @@ -31,8 +31,8 @@ func isExported(field reflect.StructField) bool { // Traverses recursively both values, assigning src's fields values to dst. // The map argument tracks comparisons that have already been seen, which allows // short circuiting on recursive types. -func deepMap(dst, src reflect.Value, visited map[uintptr]*visit, depth int, config *config) (err error) { - overwrite := config.overwrite +func deepMap(dst, src reflect.Value, visited map[uintptr]*visit, depth int, config *Config) (err error) { + overwrite := config.Overwrite if dst.CanAddr() { addr := dst.UnsafeAddr() h := 17 * addr @@ -128,23 +128,23 @@ func deepMap(dst, src reflect.Value, visited map[uintptr]*visit, depth int, conf // doesn't apply if dst is a map. // This is separated method from Merge because it is cleaner and it keeps sane // semantics: merging equal types, mapping different (restricted) types. -func Map(dst, src interface{}, opts ...func(*config)) error { +func Map(dst, src interface{}, opts ...func(*Config)) error { return _map(dst, src, opts...) } -// MapWithOverwrite will do the same as Map except that non-empty dst attributes will be overriden by +// MapWithOverwrite will do the same as Map except that non-empty dst attributes will be overridden by // non-empty src attribute values. // Deprecated: Use Map(…) with WithOverride -func MapWithOverwrite(dst, src interface{}, opts ...func(*config)) error { +func MapWithOverwrite(dst, src interface{}, opts ...func(*Config)) error { return _map(dst, src, append(opts, WithOverride)...) } -func _map(dst, src interface{}, opts ...func(*config)) error { +func _map(dst, src interface{}, opts ...func(*Config)) error { var ( vDst, vSrc reflect.Value err error ) - config := &config{} + config := &Config{} for _, opt := range opts { opt(config) diff --git a/vendor/github.com/imdario/mergo/merge.go b/vendor/github.com/imdario/mergo/merge.go index 8ca10c91088..f0e17924ac9 100644 --- a/vendor/github.com/imdario/mergo/merge.go +++ b/vendor/github.com/imdario/mergo/merge.go @@ -8,7 +8,9 @@ package mergo -import "reflect" +import ( + "reflect" +) func hasExportedField(dst reflect.Value) (exported bool) { for i, n := 0, dst.NumField(); i < n; i++ { @@ -22,20 +24,21 @@ func hasExportedField(dst reflect.Value) (exported bool) { return } -type config struct { - overwrite bool - transformers transformers +type Config struct { + Overwrite bool + AppendSlice bool + Transformers Transformers } -type transformers interface { +type Transformers interface { Transformer(reflect.Type) func(dst, src reflect.Value) error } // Traverses recursively both values, assigning src's fields values to dst. // The map argument tracks comparisons that have already been seen, which allows // short circuiting on recursive types. -func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, config *config) (err error) { - overwrite := config.overwrite +func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, config *Config) (err error) { + overwrite := config.Overwrite if !src.IsValid() { return @@ -54,8 +57,8 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co visited[h] = &visit{addr, typ, seen} } - if config.transformers != nil && !isEmptyValue(dst) { - if fn := config.transformers.Transformer(dst.Type()); fn != nil { + if config.Transformers != nil && !isEmptyValue(dst) { + if fn := config.Transformers.Transformer(dst.Type()); fn != nil { err = fn(dst, src) return } @@ -75,9 +78,8 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co } } case reflect.Map: - if len(src.MapKeys()) == 0 && !src.IsNil() && len(dst.MapKeys()) == 0 { + if dst.IsNil() && !src.IsNil() { dst.Set(reflect.MakeMap(dst.Type())) - return } for _, key := range src.MapKeys() { srcElement := src.MapIndex(key) @@ -86,7 +88,7 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co } dstElement := dst.MapIndex(key) switch srcElement.Kind() { - case reflect.Chan, reflect.Func, reflect.Map, reflect.Ptr, reflect.Interface, reflect.Slice: + case reflect.Chan, reflect.Func, reflect.Map, reflect.Interface, reflect.Slice: if srcElement.IsNil() { continue } @@ -122,7 +124,7 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co continue } - if !isEmptyValue(srcElement) && (overwrite || (!dstElement.IsValid() || isEmptyValue(dst))) { + if srcElement.IsValid() && (overwrite || (!dstElement.IsValid() || isEmptyValue(dst))) { if dst.IsNil() { dst.Set(reflect.MakeMap(dst.Type())) } @@ -130,7 +132,14 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co } } case reflect.Slice: - dst.Set(reflect.AppendSlice(dst, src)) + if !dst.CanSet() { + break + } + if !isEmptyValue(src) && (overwrite || isEmptyValue(dst)) && !config.AppendSlice { + dst.Set(src) + } else { + dst.Set(reflect.AppendSlice(dst, src)) + } case reflect.Ptr: fallthrough case reflect.Interface: @@ -174,36 +183,41 @@ func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, co // src attributes if they themselves are not empty. dst and src must be valid same-type structs // and dst must be a pointer to struct. // It won't merge unexported (private) fields and will do recursively any exported field. -func Merge(dst, src interface{}, opts ...func(*config)) error { +func Merge(dst, src interface{}, opts ...func(*Config)) error { return merge(dst, src, opts...) } // MergeWithOverwrite will do the same as Merge except that non-empty dst attributes will be overriden by // non-empty src attribute values. // Deprecated: use Merge(…) with WithOverride -func MergeWithOverwrite(dst, src interface{}, opts ...func(*config)) error { +func MergeWithOverwrite(dst, src interface{}, opts ...func(*Config)) error { return merge(dst, src, append(opts, WithOverride)...) } // WithTransformers adds transformers to merge, allowing to customize the merging of some types. -func WithTransformers(transformers transformers) func(*config) { - return func(config *config) { - config.transformers = transformers +func WithTransformers(transformers Transformers) func(*Config) { + return func(config *Config) { + config.Transformers = transformers } } // WithOverride will make merge override non-empty dst attributes with non-empty src attributes values. -func WithOverride(config *config) { - config.overwrite = true +func WithOverride(config *Config) { + config.Overwrite = true +} + +// WithAppendSlice will make merge append slices instead of overwriting it +func WithAppendSlice(config *Config) { + config.AppendSlice = true } -func merge(dst, src interface{}, opts ...func(*config)) error { +func merge(dst, src interface{}, opts ...func(*Config)) error { var ( vDst, vSrc reflect.Value err error ) - config := &config{} + config := &Config{} for _, opt := range opts { opt(config) diff --git a/vendor/github.com/json-iterator/go/config.go b/vendor/github.com/json-iterator/go/config.go index b69cdbfc372..bd66947d7c8 100644 --- a/vendor/github.com/json-iterator/go/config.go +++ b/vendor/github.com/json-iterator/go/config.go @@ -2,11 +2,12 @@ package jsoniter import ( "encoding/json" + "github.com/modern-go/concurrent" "github.com/modern-go/reflect2" "io" + "reflect" "sync" "unsafe" - "github.com/modern-go/concurrent" ) // Config customize how the API should behave. @@ -39,6 +40,8 @@ type API interface { NewDecoder(reader io.Reader) *Decoder Valid(data []byte) bool RegisterExtension(extension Extension) + DecoderOf(typ reflect2.Type) ValDecoder + EncoderOf(typ reflect2.Type) ValEncoder } // ConfigDefault the default API @@ -60,7 +63,6 @@ var ConfigFastest = Config{ ObjectFieldMustBeSimpleString: true, // do not unescape object field }.Froze() - type frozenConfig struct { configBeforeFrozen Config sortMapKeys bool @@ -104,7 +106,7 @@ func (cfg *frozenConfig) getEncoderFromCache(cacheKey uintptr) ValEncoder { return nil } -var cfgCache = &sync.Map{} +var cfgCache = concurrent.NewMap() func getFrozenConfigFromCache(cfg Config) *frozenConfig { obj, found := cfgCache.Load(cfg) @@ -192,6 +194,11 @@ func (cfg *frozenConfig) validateJsonRawMessage(extension EncoderExtension) { func (cfg *frozenConfig) useNumber(extension DecoderExtension) { extension[reflect2.TypeOfPtr((*interface{})(nil)).Elem()] = &funcDecoder{func(ptr unsafe.Pointer, iter *Iterator) { + exitingValue := *((*interface{})(ptr)) + if exitingValue != nil && reflect.TypeOf(exitingValue).Kind() == reflect.Ptr { + iter.ReadVal(exitingValue) + return + } if iter.WhatIsNext() == NumberValue { *((*interface{})(ptr)) = json.Number(iter.readNumberAsString()) } else { diff --git a/vendor/github.com/json-iterator/go/reflect_map.go b/vendor/github.com/json-iterator/go/reflect_map.go index f34d519f9c7..8812f085016 100644 --- a/vendor/github.com/json-iterator/go/reflect_map.go +++ b/vendor/github.com/json-iterator/go/reflect_map.go @@ -3,6 +3,7 @@ package jsoniter import ( "fmt" "github.com/modern-go/reflect2" + "io" "reflect" "sort" "unsafe" @@ -107,6 +108,9 @@ func encoderOfMapKey(ctx *ctx, typ reflect2.Type) ValEncoder { stringEncoder: ctx.EncoderOf(reflect2.TypeOf("")), } } + if typ.Kind() == reflect.Interface { + return &dynamicMapKeyEncoder{ctx, typ} + } return &lazyErrorEncoder{err: fmt.Errorf("unsupported map key type: %v", typ)} } } @@ -203,6 +207,21 @@ func (encoder *numericMapKeyEncoder) IsEmpty(ptr unsafe.Pointer) bool { return false } +type dynamicMapKeyEncoder struct { + ctx *ctx + valType reflect2.Type +} + +func (encoder *dynamicMapKeyEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + obj := encoder.valType.UnsafeIndirect(ptr) + encoderOfMapKey(encoder.ctx, reflect2.TypeOf(obj)).Encode(reflect2.PtrOf(obj), stream) +} + +func (encoder *dynamicMapKeyEncoder) IsEmpty(ptr unsafe.Pointer) bool { + obj := encoder.valType.UnsafeIndirect(ptr) + return encoderOfMapKey(encoder.ctx, reflect2.TypeOf(obj)).IsEmpty(reflect2.PtrOf(obj)) +} + type mapEncoder struct { mapType *reflect2.UnsafeMapType keyEncoder ValEncoder @@ -253,6 +272,9 @@ func (encoder *sortKeysMapEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { subStream.buf = make([]byte, 0, 64) key, elem := mapIter.UnsafeNext() encoder.keyEncoder.Encode(key, subStream) + if subStream.Error != nil && subStream.Error != io.EOF && stream.Error == nil { + stream.Error = subStream.Error + } encodedKey := subStream.Buffer() subIter.ResetBytes(encodedKey) decodedKey := subIter.ReadString() diff --git a/vendor/github.com/mailru/easyjson/jlexer/lexer.go b/vendor/github.com/mailru/easyjson/jlexer/lexer.go index e5558ae39c9..0fd9b122f09 100644 --- a/vendor/github.com/mailru/easyjson/jlexer/lexer.go +++ b/vendor/github.com/mailru/easyjson/jlexer/lexer.go @@ -997,6 +997,22 @@ func (r *Lexer) Float32() float32 { return float32(n) } +func (r *Lexer) Float32Str() float32 { + s, b := r.unsafeString() + if !r.Ok() { + return 0 + } + n, err := strconv.ParseFloat(s, 32) + if err != nil { + r.addNonfatalError(&LexerError{ + Offset: r.start, + Reason: err.Error(), + Data: string(b), + }) + } + return float32(n) +} + func (r *Lexer) Float64() float64 { s := r.number() if !r.Ok() { @@ -1014,6 +1030,22 @@ func (r *Lexer) Float64() float64 { return n } +func (r *Lexer) Float64Str() float64 { + s, b := r.unsafeString() + if !r.Ok() { + return 0 + } + n, err := strconv.ParseFloat(s, 64) + if err != nil { + r.addNonfatalError(&LexerError{ + Offset: r.start, + Reason: err.Error(), + Data: string(b), + }) + } + return n +} + func (r *Lexer) Error() error { return r.fatalError } @@ -1056,7 +1088,7 @@ func (r *Lexer) JsonNumber() json.Number { } if !r.Ok() { r.errInvalidToken("json.Number") - return json.Number("0") + return json.Number("") } switch r.token.kind { @@ -1064,9 +1096,12 @@ func (r *Lexer) JsonNumber() json.Number { return json.Number(r.String()) case tokenNumber: return json.Number(r.Raw()) + case tokenNull: + r.Null() + return json.Number("") default: r.errSyntax() - return json.Number("0") + return json.Number("") } } diff --git a/vendor/github.com/mailru/easyjson/jwriter/writer.go b/vendor/github.com/mailru/easyjson/jwriter/writer.go index e5a5ddfdbf1..b9ed7ccaa8b 100644 --- a/vendor/github.com/mailru/easyjson/jwriter/writer.go +++ b/vendor/github.com/mailru/easyjson/jwriter/writer.go @@ -240,11 +240,25 @@ func (w *Writer) Float32(n float32) { w.Buffer.Buf = strconv.AppendFloat(w.Buffer.Buf, float64(n), 'g', -1, 32) } +func (w *Writer) Float32Str(n float32) { + w.Buffer.EnsureSpace(20) + w.Buffer.Buf = append(w.Buffer.Buf, '"') + w.Buffer.Buf = strconv.AppendFloat(w.Buffer.Buf, float64(n), 'g', -1, 32) + w.Buffer.Buf = append(w.Buffer.Buf, '"') +} + func (w *Writer) Float64(n float64) { w.Buffer.EnsureSpace(20) w.Buffer.Buf = strconv.AppendFloat(w.Buffer.Buf, n, 'g', -1, 64) } +func (w *Writer) Float64Str(n float64) { + w.Buffer.EnsureSpace(20) + w.Buffer.Buf = append(w.Buffer.Buf, '"') + w.Buffer.Buf = strconv.AppendFloat(w.Buffer.Buf, float64(n), 'g', -1, 64) + w.Buffer.Buf = append(w.Buffer.Buf, '"') +} + func (w *Writer) Bool(v bool) { w.Buffer.EnsureSpace(5) if v { @@ -340,12 +354,11 @@ func (w *Writer) base64(in []byte) { return } - w.Buffer.EnsureSpace(((len(in) - 1) / 3 + 1) * 4) + w.Buffer.EnsureSpace(((len(in)-1)/3 + 1) * 4) si := 0 n := (len(in) / 3) * 3 - for si < n { // Convert 3x 8bit source bytes into 4 bytes val := uint(in[si+0])<<16 | uint(in[si+1])<<8 | uint(in[si+2]) diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/.gitignore b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/.gitignore new file mode 100644 index 00000000000..e16fb946bb9 --- /dev/null +++ b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/.gitignore @@ -0,0 +1 @@ +cover.dat diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/Makefile b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/Makefile new file mode 100644 index 00000000000..81be214370d --- /dev/null +++ b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/Makefile @@ -0,0 +1,7 @@ +all: + +cover: + go test -cover -v -coverprofile=cover.dat ./... + go tool cover -func cover.dat + +.PHONY: cover diff --git a/vendor/github.com/prometheus/common/expfmt/decode.go b/vendor/github.com/prometheus/common/expfmt/decode.go index a7a42d5ef41..c092723e84a 100644 --- a/vendor/github.com/prometheus/common/expfmt/decode.go +++ b/vendor/github.com/prometheus/common/expfmt/decode.go @@ -164,9 +164,9 @@ func (sd *SampleDecoder) Decode(s *model.Vector) error { } // ExtractSamples builds a slice of samples from the provided metric -// families. If an error occurs during sample extraction, it continues to +// families. If an error occurrs during sample extraction, it continues to // extract from the remaining metric families. The returned error is the last -// error that has occured. +// error that has occurred. func ExtractSamples(o *DecodeOptions, fams ...*dto.MetricFamily) (model.Vector, error) { var ( all model.Vector diff --git a/vendor/github.com/prometheus/common/expfmt/text_parse.go b/vendor/github.com/prometheus/common/expfmt/text_parse.go index 54bcfde2946..b86290afa39 100644 --- a/vendor/github.com/prometheus/common/expfmt/text_parse.go +++ b/vendor/github.com/prometheus/common/expfmt/text_parse.go @@ -556,8 +556,8 @@ func (p *TextParser) readTokenUntilWhitespace() { // byte considered is the byte already read (now in p.currentByte). The first // newline byte encountered is still copied into p.currentByte, but not into // p.currentToken. If recognizeEscapeSequence is true, two escape sequences are -// recognized: '\\' tranlates into '\', and '\n' into a line-feed character. All -// other escape sequences are invalid and cause an error. +// recognized: '\\' translates into '\', and '\n' into a line-feed character. +// All other escape sequences are invalid and cause an error. func (p *TextParser) readTokenUntilNewline(recognizeEscapeSequence bool) { p.currentToken.Reset() escaped := false diff --git a/vendor/github.com/prometheus/common/model/silence.go b/vendor/github.com/prometheus/common/model/silence.go index 7538e299774..bb99889d2cc 100644 --- a/vendor/github.com/prometheus/common/model/silence.go +++ b/vendor/github.com/prometheus/common/model/silence.go @@ -59,8 +59,8 @@ func (m *Matcher) Validate() error { return nil } -// Silence defines the representation of a silence definiton -// in the Prometheus eco-system. +// Silence defines the representation of a silence definition in the Prometheus +// eco-system. type Silence struct { ID uint64 `json:"id,omitempty"` diff --git a/vendor/github.com/prometheus/common/model/value.go b/vendor/github.com/prometheus/common/model/value.go index c9ed3ffd82a..c9d8fb1a283 100644 --- a/vendor/github.com/prometheus/common/model/value.go +++ b/vendor/github.com/prometheus/common/model/value.go @@ -100,7 +100,7 @@ func (s *SamplePair) UnmarshalJSON(b []byte) error { } // Equal returns true if this SamplePair and o have equal Values and equal -// Timestamps. The sematics of Value equality is defined by SampleValue.Equal. +// Timestamps. The semantics of Value equality is defined by SampleValue.Equal. func (s *SamplePair) Equal(o *SamplePair) bool { return s == o || (s.Value.Equal(o.Value) && s.Timestamp.Equal(o.Timestamp)) } @@ -117,7 +117,7 @@ type Sample struct { } // Equal compares first the metrics, then the timestamp, then the value. The -// sematics of value equality is defined by SampleValue.Equal. +// semantics of value equality is defined by SampleValue.Equal. func (s *Sample) Equal(o *Sample) bool { if s == o { return true diff --git a/vendor/github.com/prometheus/procfs/net_dev.go b/vendor/github.com/prometheus/procfs/net_dev.go index 6c17affe8ab..3f2523371ab 100644 --- a/vendor/github.com/prometheus/procfs/net_dev.go +++ b/vendor/github.com/prometheus/procfs/net_dev.go @@ -184,7 +184,7 @@ func (nd NetDev) parseLine(rawLine string) (*NetDevLine, error) { } // Total aggregates the values across interfaces and returns a new NetDevLine. -// The Name field will be a sorted comma seperated list of interface names. +// The Name field will be a sorted comma separated list of interface names. func (nd NetDev) Total() NetDevLine { total := NetDevLine{} diff --git a/vendor/github.com/prometheus/procfs/nfs/nfs.go b/vendor/github.com/prometheus/procfs/nfs/nfs.go index e2185b7827a..651bf681952 100644 --- a/vendor/github.com/prometheus/procfs/nfs/nfs.go +++ b/vendor/github.com/prometheus/procfs/nfs/nfs.go @@ -11,7 +11,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -// Package nfsd implements parsing of /proc/net/rpc/nfsd. +// Package nfs implements parsing of /proc/net/rpc/nfsd. // Fields are documented in https://www.svennd.be/nfsd-stats-explained-procnetrpcnfsd/ package nfs @@ -136,8 +136,8 @@ type ClientV4Stats struct { Setattr uint64 FsInfo uint64 Renew uint64 - SetClientId uint64 - SetClientIdConfirm uint64 + SetClientID uint64 + SetClientIDConfirm uint64 Lock uint64 Lockt uint64 Locku uint64 @@ -156,13 +156,13 @@ type ClientV4Stats struct { ReadDir uint64 ServerCaps uint64 DelegReturn uint64 - GetAcl uint64 - SetAcl uint64 + GetACL uint64 + SetACL uint64 FsLocations uint64 ReleaseLockowner uint64 Secinfo uint64 FsidPresent uint64 - ExchangeId uint64 + ExchangeID uint64 CreateSession uint64 DestroySession uint64 Sequence uint64 @@ -173,11 +173,11 @@ type ClientV4Stats struct { LayoutCommit uint64 LayoutReturn uint64 SecinfoNoName uint64 - TestStateId uint64 - FreeStateId uint64 + TestStateID uint64 + FreeStateID uint64 GetDeviceList uint64 BindConnToSession uint64 - DestroyClientId uint64 + DestroyClientID uint64 Seek uint64 Allocate uint64 DeAllocate uint64 @@ -238,7 +238,7 @@ type V4Ops struct { RelLockOwner uint64 } -// RPCStats models all stats from /proc/net/rpc/nfs. +// ClientRPCStats models all stats from /proc/net/rpc/nfs. type ClientRPCStats struct { Network Network ClientRPC ClientRPC diff --git a/vendor/github.com/prometheus/procfs/nfs/parse.go b/vendor/github.com/prometheus/procfs/nfs/parse.go index 8f568f0116d..95a83cc5bc5 100644 --- a/vendor/github.com/prometheus/procfs/nfs/parse.go +++ b/vendor/github.com/prometheus/procfs/nfs/parse.go @@ -204,8 +204,8 @@ func parseClientV4Stats(v []uint64) (ClientV4Stats, error) { Setattr: v[10], FsInfo: v[11], Renew: v[12], - SetClientId: v[13], - SetClientIdConfirm: v[14], + SetClientID: v[13], + SetClientIDConfirm: v[14], Lock: v[15], Lockt: v[16], Locku: v[17], @@ -224,13 +224,13 @@ func parseClientV4Stats(v []uint64) (ClientV4Stats, error) { ReadDir: v[30], ServerCaps: v[31], DelegReturn: v[32], - GetAcl: v[33], - SetAcl: v[34], + GetACL: v[33], + SetACL: v[34], FsLocations: v[35], ReleaseLockowner: v[36], Secinfo: v[37], FsidPresent: v[38], - ExchangeId: v[39], + ExchangeID: v[39], CreateSession: v[40], DestroySession: v[41], Sequence: v[42], @@ -241,11 +241,11 @@ func parseClientV4Stats(v []uint64) (ClientV4Stats, error) { LayoutCommit: v[47], LayoutReturn: v[48], SecinfoNoName: v[49], - TestStateId: v[50], - FreeStateId: v[51], + TestStateID: v[50], + FreeStateID: v[51], GetDeviceList: v[52], BindConnToSession: v[53], - DestroyClientId: v[54], + DestroyClientID: v[54], Seek: v[55], Allocate: v[56], DeAllocate: v[57], diff --git a/vendor/github.com/prometheus/procfs/ttar b/vendor/github.com/prometheus/procfs/ttar index b514e731222..b0171a12b59 100755 --- a/vendor/github.com/prometheus/procfs/ttar +++ b/vendor/github.com/prometheus/procfs/ttar @@ -229,13 +229,13 @@ function extract { # The repeated pattern makes up for sed's lack of negative # lookbehind assertions (for consecutive null bytes). echo -n "$line" | \ - sed 's/^NULLBYTE/\x0/g' | \ - sed 's/\([^\\]\)NULLBYTE/\1\x0/g' | \ - sed 's/\([^\\]\)NULLBYTE/\1\x0/g' | \ - sed 's/\\NULLBYTE/NULLBYTE/g' | \ - sed 's/\([^\\]\)EOF/\1/g' | \ - sed 's/\\EOF/EOF/g' \ - >> "$path" + sed -e 's/^NULLBYTE/\x0/g; + s/\([^\\]\)NULLBYTE/\1\x0/g; + s/\([^\\]\)NULLBYTE/\1\x0/g; + s/\\NULLBYTE/NULLBYTE/g; + s/\([^\\]\)EOF/\1/g; + s/\\EOF/EOF/g; + ' >> "$path" fi if [[ "$eof_without_newline" -eq 0 ]]; then echo >> "$path" @@ -283,11 +283,14 @@ function get_mode { local mfile=$1 if [ -z "${STAT_OPTION:-}" ]; then if stat -c '%a' "$mfile" >/dev/null 2>&1; then + # GNU stat STAT_OPTION='-c' STAT_FORMAT='%a' else + # BSD stat STAT_OPTION='-f' - STAT_FORMAT='%A' + # Octal output, user/group/other (omit file type, sticky bit) + STAT_FORMAT='%OLp' fi fi stat "${STAT_OPTION}" "${STAT_FORMAT}" "$mfile" @@ -334,9 +337,10 @@ function _create { < "$file" python -c "$PYTHON_CREATE_FILTER" else < "$file" \ - sed 's/EOF/\\EOF/g' | \ - sed 's/NULLBYTE/\\NULLBYTE/g' | \ - sed 's/\x0/NULLBYTE/g' + sed 's/EOF/\\EOF/g; + s/NULLBYTE/\\NULLBYTE/g; + s/\x0/NULLBYTE/g; + ' fi if [[ "$eof_without_newline" -eq 1 ]]; then # Finish line with EOF to indicate that the original line did diff --git a/vendor/github.com/sirupsen/logrus/CHANGELOG.md b/vendor/github.com/sirupsen/logrus/CHANGELOG.md index cc58f6451fa..1bd1deb2947 100644 --- a/vendor/github.com/sirupsen/logrus/CHANGELOG.md +++ b/vendor/github.com/sirupsen/logrus/CHANGELOG.md @@ -1,3 +1,8 @@ +# 1.0.5 + +* Fix hooks race (#707) +* Fix panic deadlock (#695) + # 1.0.4 * Fix race when adding hooks (#612) diff --git a/vendor/github.com/sirupsen/logrus/README.md b/vendor/github.com/sirupsen/logrus/README.md index 08584b5fc24..f77819b1686 100644 --- a/vendor/github.com/sirupsen/logrus/README.md +++ b/vendor/github.com/sirupsen/logrus/README.md @@ -220,7 +220,7 @@ Logrus comes with [built-in hooks](hooks/). Add those, or your custom hook, in ```go import ( log "github.com/sirupsen/logrus" - "gopkg.in/gemnasium/logrus-airbrake-hook.v2" // the package is named "aibrake" + "gopkg.in/gemnasium/logrus-airbrake-hook.v2" // the package is named "airbrake" logrus_syslog "github.com/sirupsen/logrus/hooks/syslog" "log/syslog" ) @@ -247,6 +247,7 @@ Note: Syslog hook also support connecting to local syslog (Ex. "/dev/log" or "/v | [Airbrake](https://github.com/gemnasium/logrus-airbrake-hook) | Send errors to the Airbrake API V3. Uses the official [`gobrake`](https://github.com/airbrake/gobrake) behind the scenes. | | [Amazon Kinesis](https://github.com/evalphobia/logrus_kinesis) | Hook for logging to [Amazon Kinesis](https://aws.amazon.com/kinesis/) | | [Amqp-Hook](https://github.com/vladoatanasov/logrus_amqp) | Hook for logging to Amqp broker (Like RabbitMQ) | +| [Application Insights](https://github.com/jjcollinge/logrus-appinsights) | Hook for logging to [Application Insights](https://azure.microsoft.com/en-us/services/application-insights/) | [AzureTableHook](https://github.com/kpfaulkner/azuretablehook/) | Hook for logging to Azure Table Storage| | [Bugsnag](https://github.com/Shopify/logrus-bugsnag/blob/master/bugsnag.go) | Send errors to the Bugsnag exception tracking service. | | [DeferPanic](https://github.com/deferpanic/dp-logrus) | Hook for logging to DeferPanic | @@ -262,6 +263,7 @@ Note: Syslog hook also support connecting to local syslog (Ex. "/dev/log" or "/v | [Influxus](http://github.com/vlad-doru/influxus) | Hook for concurrently logging to [InfluxDB](http://influxdata.com/) | | [Journalhook](https://github.com/wercker/journalhook) | Hook for logging to `systemd-journald` | | [KafkaLogrus](https://github.com/tracer0tong/kafkalogrus) | Hook for logging to Kafka | +| [Kafka REST Proxy](https://github.com/Nordstrom/logrus-kafka-rest-proxy) | Hook for logging to [Kafka REST Proxy](https://docs.confluent.io/current/kafka-rest/docs) | | [LFShook](https://github.com/rifflock/lfshook) | Hook for logging to the local filesystem | | [Logbeat](https://github.com/macandmia/logbeat) | Hook for logging to [Opbeat](https://opbeat.com/) | | [Logentries](https://github.com/jcftang/logentriesrus) | Hook for logging to [Logentries](https://logentries.com/) | diff --git a/vendor/github.com/sirupsen/logrus/entry.go b/vendor/github.com/sirupsen/logrus/entry.go index 1fad45e0823..778f4c9f0d3 100644 --- a/vendor/github.com/sirupsen/logrus/entry.go +++ b/vendor/github.com/sirupsen/logrus/entry.go @@ -94,38 +94,47 @@ func (entry Entry) log(level Level, msg string) { entry.Level = level entry.Message = msg - entry.Logger.mu.Lock() - err := entry.Logger.Hooks.Fire(level, &entry) - entry.Logger.mu.Unlock() - if err != nil { - entry.Logger.mu.Lock() - fmt.Fprintf(os.Stderr, "Failed to fire hook: %v\n", err) - entry.Logger.mu.Unlock() - } + entry.fireHooks() + buffer = bufferPool.Get().(*bytes.Buffer) buffer.Reset() defer bufferPool.Put(buffer) entry.Buffer = buffer - serialized, err := entry.Logger.Formatter.Format(&entry) + + entry.write() + entry.Buffer = nil + + // To avoid Entry#log() returning a value that only would make sense for + // panic() to use in Entry#Panic(), we avoid the allocation by checking + // directly here. + if level <= PanicLevel { + panic(&entry) + } +} + +// This function is not declared with a pointer value because otherwise +// race conditions will occur when using multiple goroutines +func (entry Entry) fireHooks() { + entry.Logger.mu.Lock() + defer entry.Logger.mu.Unlock() + err := entry.Logger.Hooks.Fire(entry.Level, &entry) + if err != nil { + fmt.Fprintf(os.Stderr, "Failed to fire hook: %v\n", err) + } +} + +func (entry *Entry) write() { + serialized, err := entry.Logger.Formatter.Format(entry) + entry.Logger.mu.Lock() + defer entry.Logger.mu.Unlock() if err != nil { - entry.Logger.mu.Lock() fmt.Fprintf(os.Stderr, "Failed to obtain reader, %v\n", err) - entry.Logger.mu.Unlock() } else { - entry.Logger.mu.Lock() _, err = entry.Logger.Out.Write(serialized) if err != nil { fmt.Fprintf(os.Stderr, "Failed to write to log, %v\n", err) } - entry.Logger.mu.Unlock() - } - - // To avoid Entry#log() returning a value that only would make sense for - // panic() to use in Entry#Panic(), we avoid the allocation by checking - // directly here. - if level <= PanicLevel { - panic(&entry) } } diff --git a/vendor/github.com/sirupsen/logrus/terminal_bsd.go b/vendor/github.com/sirupsen/logrus/terminal_bsd.go index d7b3893f3fe..4880d13d26d 100644 --- a/vendor/github.com/sirupsen/logrus/terminal_bsd.go +++ b/vendor/github.com/sirupsen/logrus/terminal_bsd.go @@ -1,5 +1,5 @@ // +build darwin freebsd openbsd netbsd dragonfly -// +build !appengine +// +build !appengine,!gopherjs package logrus diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_appengine.go b/vendor/github.com/sirupsen/logrus/terminal_check_appengine.go index 2403de98192..3de08e802fd 100644 --- a/vendor/github.com/sirupsen/logrus/terminal_check_appengine.go +++ b/vendor/github.com/sirupsen/logrus/terminal_check_appengine.go @@ -1,4 +1,4 @@ -// +build appengine +// +build appengine gopherjs package logrus diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_notappengine.go b/vendor/github.com/sirupsen/logrus/terminal_check_notappengine.go index 116bcb4e339..067047a1233 100644 --- a/vendor/github.com/sirupsen/logrus/terminal_check_notappengine.go +++ b/vendor/github.com/sirupsen/logrus/terminal_check_notappengine.go @@ -1,4 +1,4 @@ -// +build !appengine +// +build !appengine,!gopherjs package logrus diff --git a/vendor/github.com/sirupsen/logrus/terminal_linux.go b/vendor/github.com/sirupsen/logrus/terminal_linux.go index 88d7298e24f..f29a0097c81 100644 --- a/vendor/github.com/sirupsen/logrus/terminal_linux.go +++ b/vendor/github.com/sirupsen/logrus/terminal_linux.go @@ -3,7 +3,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build !appengine +// +build !appengine,!gopherjs package logrus diff --git a/vendor/github.com/spf13/cobra/.travis.yml b/vendor/github.com/spf13/cobra/.travis.yml index 68efa136331..5afcb209619 100644 --- a/vendor/github.com/spf13/cobra/.travis.yml +++ b/vendor/github.com/spf13/cobra/.travis.yml @@ -2,8 +2,8 @@ language: go matrix: include: - - go: 1.7.6 - - go: 1.8.3 + - go: 1.9.4 + - go: 1.10.0 - go: tip allow_failures: - go: tip diff --git a/vendor/github.com/spf13/cobra/README.md b/vendor/github.com/spf13/cobra/README.md index 373a056ba19..851fcc087ca 100644 --- a/vendor/github.com/spf13/cobra/README.md +++ b/vendor/github.com/spf13/cobra/README.md @@ -20,6 +20,7 @@ Many of the most widely used Go projects are built using Cobra including: * [Nanobox](https://github.com/nanobox-io/nanobox)/[Nanopack](https://github.com/nanopack) * [rclone](http://rclone.org/) * [nehm](https://github.com/bogem/nehm) +* [Pouch](https://github.com/alibaba/pouch) [![Build Status](https://travis-ci.org/spf13/cobra.svg "Travis CI status")](https://travis-ci.org/spf13/cobra) [![CircleCI status](https://circleci.com/gh/spf13/cobra.png?circle-token=:circle-token "CircleCI status")](https://circleci.com/gh/spf13/cobra) @@ -158,10 +159,7 @@ import ( ) func main() { - if err := cmd.RootCmd.Execute(); err != nil { - fmt.Println(err) - os.Exit(1) - } + cmd.Execute() } ``` @@ -174,7 +172,7 @@ commands you want. It's the easiest way to incorporate Cobra into your applicati ## Using the Cobra Library -To manually implement Cobra you need to create a bare main.go file and a RootCmd file. +To manually implement Cobra you need to create a bare main.go file and a rootCmd file. You will optionally provide additional commands as you see fit. ### Create rootCmd @@ -184,7 +182,7 @@ Cobra doesn't require any special constructors. Simply create your commands. Ideally you place this in app/cmd/root.go: ```go -var RootCmd = &cobra.Command{ +var rootCmd = &cobra.Command{ Use: "hugo", Short: "Hugo is a very fast static site generator", Long: `A Fast and Flexible Static Site Generator built with @@ -194,6 +192,13 @@ var RootCmd = &cobra.Command{ // Do Stuff Here }, } + +func Execute() { + if err := rootCmd.Execute(); err != nil { + fmt.Println(err) + os.Exit(1) + } +} ``` You will additionally define flags and handle configuration in your init() function. @@ -212,22 +217,18 @@ import ( func init() { cobra.OnInitialize(initConfig) - RootCmd.PersistentFlags().StringVar(&cfgFile, "config", "", "config file (default is $HOME/.cobra.yaml)") - RootCmd.PersistentFlags().StringVarP(&projectBase, "projectbase", "b", "", "base project directory eg. github.com/spf13/") - RootCmd.PersistentFlags().StringP("author", "a", "YOUR NAME", "Author name for copyright attribution") - RootCmd.PersistentFlags().StringVarP(&userLicense, "license", "l", "", "Name of license for the project (can provide `licensetext` in config)") - RootCmd.PersistentFlags().Bool("viper", true, "Use Viper for configuration") - viper.BindPFlag("author", RootCmd.PersistentFlags().Lookup("author")) - viper.BindPFlag("projectbase", RootCmd.PersistentFlags().Lookup("projectbase")) - viper.BindPFlag("useViper", RootCmd.PersistentFlags().Lookup("viper")) + rootCmd.PersistentFlags().StringVar(&cfgFile, "config", "", "config file (default is $HOME/.cobra.yaml)") + rootCmd.PersistentFlags().StringVarP(&projectBase, "projectbase", "b", "", "base project directory eg. github.com/spf13/") + rootCmd.PersistentFlags().StringP("author", "a", "YOUR NAME", "Author name for copyright attribution") + rootCmd.PersistentFlags().StringVarP(&userLicense, "license", "l", "", "Name of license for the project (can provide `licensetext` in config)") + rootCmd.PersistentFlags().Bool("viper", true, "Use Viper for configuration") + viper.BindPFlag("author", rootCmd.PersistentFlags().Lookup("author")) + viper.BindPFlag("projectbase", rootCmd.PersistentFlags().Lookup("projectbase")) + viper.BindPFlag("useViper", rootCmd.PersistentFlags().Lookup("viper")) viper.SetDefault("author", "NAME HERE ") viper.SetDefault("license", "apache") } -func Execute() { - RootCmd.Execute() -} - func initConfig() { // Don't forget to read config either from cfgFile or from home directory! if cfgFile != "" { @@ -271,10 +272,7 @@ import ( ) func main() { - if err := cmd.RootCmd.Execute(); err != nil { - fmt.Println(err) - os.Exit(1) - } + cmd.Execute() } ``` @@ -290,12 +288,13 @@ populate it with the following: package cmd import ( - "github.com/spf13/cobra" "fmt" + + "github.com/spf13/cobra" ) func init() { - RootCmd.AddCommand(versionCmd) + rootCmd.AddCommand(versionCmd) } var versionCmd = &cobra.Command{ @@ -332,7 +331,7 @@ command it's assigned to as well as every command under that command. For global flags, assign a flag as a persistent flag on the root. ```go -RootCmd.PersistentFlags().BoolVarP(&Verbose, "verbose", "v", false, "verbose output") +rootCmd.PersistentFlags().BoolVarP(&Verbose, "verbose", "v", false, "verbose output") ``` ### Local Flags @@ -340,13 +339,13 @@ RootCmd.PersistentFlags().BoolVarP(&Verbose, "verbose", "v", false, "verbose out A flag can also be assigned locally which will only apply to that specific command. ```go -RootCmd.Flags().StringVarP(&Source, "source", "s", "", "Source directory to read from") +rootCmd.Flags().StringVarP(&Source, "source", "s", "", "Source directory to read from") ``` ### Local Flag on Parent Commands -By default Cobra only parses local flags on the target command, any local flags on -parent commands are ignored. By enabling `Command.TraverseChildren` Cobra will +By default Cobra only parses local flags on the target command, any local flags on +parent commands are ignored. By enabling `Command.TraverseChildren` Cobra will parse local flags on each command before executing the target command. ```go @@ -363,8 +362,8 @@ You can also bind your flags with [viper](https://github.com/spf13/viper): var author string func init() { - RootCmd.PersistentFlags().StringVar(&author, "author", "YOUR NAME", "Author name for copyright attribution") - viper.BindPFlag("author", RootCmd.PersistentFlags().Lookup("author")) + rootCmd.PersistentFlags().StringVar(&author, "author", "YOUR NAME", "Author name for copyright attribution") + viper.BindPFlag("author", rootCmd.PersistentFlags().Lookup("author")) } ``` @@ -374,6 +373,15 @@ when the `--author` flag is not provided by user. More in [viper documentation](https://github.com/spf13/viper#working-with-flags). +### Required flags + +Flags are optional by default. If instead you wish your command to report an error +when a flag has not been set, mark it as required: +```go +rootCmd.Flags().StringVarP(&Region, "region", "r", "", "AWS region (required)") +rootCmd.MarkFlagRequired("region") +``` + ## Positional and Custom Arguments Validation of positional arguments can be specified using the `Args` field @@ -522,7 +530,7 @@ around it. In fact, you can provide your own if you want. ### Defining your own help You can provide your own Help command or your own template for the default command to use -with followind functions: +with following functions: ```go cmd.SetHelpCommand(cmd *Command) @@ -569,6 +577,13 @@ cmd.SetUsageFunc(f func(*Command) error) cmd.SetUsageTemplate(s string) ``` +## Version Flag + +Cobra adds a top-level '--version' flag if the Version field is set on the root command. +Running an application with the '--version' flag will print the version to stdout using +the version template. The template can be customized using the +`cmd.SetVersionTemplate(s string)` function. + ## PreRun and PostRun Hooks It is possible to run functions before or after the main `Run` function of your command. The `PersistentPreRun` and `PreRun` functions will be executed before `Run`. `PersistentPostRun` and `PostRun` will be executed after `Run`. The `Persistent*Run` functions will be inherited by children if they do not declare their own. These functions are run in the following order: diff --git a/vendor/github.com/spf13/cobra/args.go b/vendor/github.com/spf13/cobra/args.go index 94a6ca2737e..a5d8a9273ea 100644 --- a/vendor/github.com/spf13/cobra/args.go +++ b/vendor/github.com/spf13/cobra/args.go @@ -16,14 +16,14 @@ func legacyArgs(cmd *Command, args []string) error { return nil } - // root command with subcommands, do subcommand checking + // root command with subcommands, do subcommand checking. if !cmd.HasParent() && len(args) > 0 { return fmt.Errorf("unknown command %q for %q%s", args[0], cmd.CommandPath(), cmd.findSuggestions(args[0])) } return nil } -// NoArgs returns an error if any args are included +// NoArgs returns an error if any args are included. func NoArgs(cmd *Command, args []string) error { if len(args) > 0 { return fmt.Errorf("unknown command %q for %q", args[0], cmd.CommandPath()) @@ -31,7 +31,7 @@ func NoArgs(cmd *Command, args []string) error { return nil } -// OnlyValidArgs returns an error if any args are not in the list of ValidArgs +// OnlyValidArgs returns an error if any args are not in the list of ValidArgs. func OnlyValidArgs(cmd *Command, args []string) error { if len(cmd.ValidArgs) > 0 { for _, v := range args { @@ -43,21 +43,12 @@ func OnlyValidArgs(cmd *Command, args []string) error { return nil } -func stringInSlice(a string, list []string) bool { - for _, b := range list { - if b == a { - return true - } - } - return false -} - -// ArbitraryArgs never returns an error +// ArbitraryArgs never returns an error. func ArbitraryArgs(cmd *Command, args []string) error { return nil } -// MinimumNArgs returns an error if there is not at least N args +// MinimumNArgs returns an error if there is not at least N args. func MinimumNArgs(n int) PositionalArgs { return func(cmd *Command, args []string) error { if len(args) < n { @@ -67,7 +58,7 @@ func MinimumNArgs(n int) PositionalArgs { } } -// MaximumNArgs returns an error if there are more than N args +// MaximumNArgs returns an error if there are more than N args. func MaximumNArgs(n int) PositionalArgs { return func(cmd *Command, args []string) error { if len(args) > n { @@ -77,7 +68,7 @@ func MaximumNArgs(n int) PositionalArgs { } } -// ExactArgs returns an error if there are not exactly n args +// ExactArgs returns an error if there are not exactly n args. func ExactArgs(n int) PositionalArgs { return func(cmd *Command, args []string) error { if len(args) != n { @@ -87,7 +78,7 @@ func ExactArgs(n int) PositionalArgs { } } -// RangeArgs returns an error if the number of args is not within the expected range +// RangeArgs returns an error if the number of args is not within the expected range. func RangeArgs(min int, max int) PositionalArgs { return func(cmd *Command, args []string) error { if len(args) < min || len(args) > max { diff --git a/vendor/github.com/spf13/cobra/bash_completions.go b/vendor/github.com/spf13/cobra/bash_completions.go index c19fe7a068b..8fa8f486fac 100644 --- a/vendor/github.com/spf13/cobra/bash_completions.go +++ b/vendor/github.com/spf13/cobra/bash_completions.go @@ -21,8 +21,8 @@ const ( func writePreamble(buf *bytes.Buffer, name string) { buf.WriteString(fmt.Sprintf("# bash completion for %-36s -*- shell-script -*-\n", name)) - buf.WriteString(` -__debug() + buf.WriteString(fmt.Sprintf(` +__%[1]s_debug() { if [[ -n ${BASH_COMP_DEBUG_FILE} ]]; then echo "$*" >> "${BASH_COMP_DEBUG_FILE}" @@ -31,13 +31,13 @@ __debug() # Homebrew on Macs have version 1.3 of bash-completion which doesn't include # _init_completion. This is a very minimal version of that function. -__my_init_completion() +__%[1]s_init_completion() { COMPREPLY=() _get_comp_words_by_ref "$@" cur prev words cword } -__index_of_word() +__%[1]s_index_of_word() { local w word=$1 shift @@ -49,7 +49,7 @@ __index_of_word() index=-1 } -__contains_word() +__%[1]s_contains_word() { local w word=$1; shift for w in "$@"; do @@ -58,9 +58,9 @@ __contains_word() return 1 } -__handle_reply() +__%[1]s_handle_reply() { - __debug "${FUNCNAME[0]}" + __%[1]s_debug "${FUNCNAME[0]}" case $cur in -*) if [[ $(type -t compopt) = "builtin" ]]; then @@ -85,7 +85,7 @@ __handle_reply() local index flag flag="${cur%%=*}" - __index_of_word "${flag}" "${flags_with_completion[@]}" + __%[1]s_index_of_word "${flag}" "${flags_with_completion[@]}" COMPREPLY=() if [[ ${index} -ge 0 ]]; then PREFIX="" @@ -103,7 +103,7 @@ __handle_reply() # check if we are handling a flag with special work handling local index - __index_of_word "${prev}" "${flags_with_completion[@]}" + __%[1]s_index_of_word "${prev}" "${flags_with_completion[@]}" if [[ ${index} -ge 0 ]]; then ${flags_completion[${index}]} return @@ -136,24 +136,30 @@ __handle_reply() if declare -F __ltrim_colon_completions >/dev/null; then __ltrim_colon_completions "$cur" fi + + # If there is only 1 completion and it is a flag with an = it will be completed + # but we don't want a space after the = + if [[ "${#COMPREPLY[@]}" -eq "1" ]] && [[ $(type -t compopt) = "builtin" ]] && [[ "${COMPREPLY[0]}" == --*= ]]; then + compopt -o nospace + fi } # The arguments should be in the form "ext1|ext2|extn" -__handle_filename_extension_flag() +__%[1]s_handle_filename_extension_flag() { local ext="$1" _filedir "@(${ext})" } -__handle_subdirs_in_dir_flag() +__%[1]s_handle_subdirs_in_dir_flag() { local dir="$1" pushd "${dir}" >/dev/null 2>&1 && _filedir -d && popd >/dev/null 2>&1 } -__handle_flag() +__%[1]s_handle_flag() { - __debug "${FUNCNAME[0]}: c is $c words[c] is ${words[c]}" + __%[1]s_debug "${FUNCNAME[0]}: c is $c words[c] is ${words[c]}" # if a command required a flag, and we found it, unset must_have_one_flag() local flagname=${words[c]} @@ -164,27 +170,30 @@ __handle_flag() flagname=${flagname%%=*} # strip everything after the = flagname="${flagname}=" # but put the = back fi - __debug "${FUNCNAME[0]}: looking for ${flagname}" - if __contains_word "${flagname}" "${must_have_one_flag[@]}"; then + __%[1]s_debug "${FUNCNAME[0]}: looking for ${flagname}" + if __%[1]s_contains_word "${flagname}" "${must_have_one_flag[@]}"; then must_have_one_flag=() fi # if you set a flag which only applies to this command, don't show subcommands - if __contains_word "${flagname}" "${local_nonpersistent_flags[@]}"; then + if __%[1]s_contains_word "${flagname}" "${local_nonpersistent_flags[@]}"; then commands=() fi # keep flag value with flagname as flaghash - if [ -n "${flagvalue}" ] ; then - flaghash[${flagname}]=${flagvalue} - elif [ -n "${words[ $((c+1)) ]}" ] ; then - flaghash[${flagname}]=${words[ $((c+1)) ]} - else - flaghash[${flagname}]="true" # pad "true" for bool flag + # flaghash variable is an associative array which is only supported in bash > 3. + if [[ -z "${BASH_VERSION}" || "${BASH_VERSINFO[0]}" -gt 3 ]]; then + if [ -n "${flagvalue}" ] ; then + flaghash[${flagname}]=${flagvalue} + elif [ -n "${words[ $((c+1)) ]}" ] ; then + flaghash[${flagname}]=${words[ $((c+1)) ]} + else + flaghash[${flagname}]="true" # pad "true" for bool flag + fi fi # skip the argument to a two word flag - if __contains_word "${words[c]}" "${two_word_flags[@]}"; then + if __%[1]s_contains_word "${words[c]}" "${two_word_flags[@]}"; then c=$((c+1)) # if we are looking for a flags value, don't show commands if [[ $c -eq $cword ]]; then @@ -196,13 +205,13 @@ __handle_flag() } -__handle_noun() +__%[1]s_handle_noun() { - __debug "${FUNCNAME[0]}: c is $c words[c] is ${words[c]}" + __%[1]s_debug "${FUNCNAME[0]}: c is $c words[c] is ${words[c]}" - if __contains_word "${words[c]}" "${must_have_one_noun[@]}"; then + if __%[1]s_contains_word "${words[c]}" "${must_have_one_noun[@]}"; then must_have_one_noun=() - elif __contains_word "${words[c]}" "${noun_aliases[@]}"; then + elif __%[1]s_contains_word "${words[c]}" "${noun_aliases[@]}"; then must_have_one_noun=() fi @@ -210,45 +219,53 @@ __handle_noun() c=$((c+1)) } -__handle_command() +__%[1]s_handle_command() { - __debug "${FUNCNAME[0]}: c is $c words[c] is ${words[c]}" + __%[1]s_debug "${FUNCNAME[0]}: c is $c words[c] is ${words[c]}" local next_command if [[ -n ${last_command} ]]; then next_command="_${last_command}_${words[c]//:/__}" else if [[ $c -eq 0 ]]; then - next_command="_$(basename "${words[c]//:/__}")" + next_command="_%[1]s_root_command" else next_command="_${words[c]//:/__}" fi fi c=$((c+1)) - __debug "${FUNCNAME[0]}: looking for ${next_command}" + __%[1]s_debug "${FUNCNAME[0]}: looking for ${next_command}" declare -F "$next_command" >/dev/null && $next_command } -__handle_word() +__%[1]s_handle_word() { if [[ $c -ge $cword ]]; then - __handle_reply + __%[1]s_handle_reply return fi - __debug "${FUNCNAME[0]}: c is $c words[c] is ${words[c]}" + __%[1]s_debug "${FUNCNAME[0]}: c is $c words[c] is ${words[c]}" if [[ "${words[c]}" == -* ]]; then - __handle_flag - elif __contains_word "${words[c]}" "${commands[@]}"; then - __handle_command - elif [[ $c -eq 0 ]] && __contains_word "$(basename "${words[c]}")" "${commands[@]}"; then - __handle_command + __%[1]s_handle_flag + elif __%[1]s_contains_word "${words[c]}" "${commands[@]}"; then + __%[1]s_handle_command + elif [[ $c -eq 0 ]]; then + __%[1]s_handle_command + elif __%[1]s_contains_word "${words[c]}" "${command_aliases[@]}"; then + # aliashash variable is an associative array which is only supported in bash > 3. + if [[ -z "${BASH_VERSION}" || "${BASH_VERSINFO[0]}" -gt 3 ]]; then + words[c]=${aliashash[${words[c]}]} + __%[1]s_handle_command + else + __%[1]s_handle_noun + fi else - __handle_noun + __%[1]s_handle_noun fi - __handle_word + __%[1]s_handle_word } -`) +`, name)) } func writePostscript(buf *bytes.Buffer, name string) { @@ -257,10 +274,11 @@ func writePostscript(buf *bytes.Buffer, name string) { buf.WriteString(fmt.Sprintf(`{ local cur prev words cword declare -A flaghash 2>/dev/null || : + declare -A aliashash 2>/dev/null || : if declare -F _init_completion >/dev/null 2>&1; then _init_completion -s || return else - __my_init_completion -n "=" || return + __%[1]s_init_completion -n "=" || return fi local c=0 @@ -269,13 +287,13 @@ func writePostscript(buf *bytes.Buffer, name string) { local local_nonpersistent_flags=() local flags_with_completion=() local flags_completion=() - local commands=("%s") + local commands=("%[1]s") local must_have_one_flag=() local must_have_one_noun=() local last_command local nouns=() - __handle_word + __%[1]s_handle_word } `, name)) @@ -296,11 +314,12 @@ func writeCommands(buf *bytes.Buffer, cmd *Command) { continue } buf.WriteString(fmt.Sprintf(" commands+=(%q)\n", c.Name())) + writeCmdAliases(buf, c) } buf.WriteString("\n") } -func writeFlagHandler(buf *bytes.Buffer, name string, annotations map[string][]string) { +func writeFlagHandler(buf *bytes.Buffer, name string, annotations map[string][]string, cmd *Command) { for key, value := range annotations { switch key { case BashCompFilenameExt: @@ -308,7 +327,7 @@ func writeFlagHandler(buf *bytes.Buffer, name string, annotations map[string][]s var ext string if len(value) > 0 { - ext = "__handle_filename_extension_flag " + strings.Join(value, "|") + ext = fmt.Sprintf("__%s_handle_filename_extension_flag ", cmd.Root().Name()) + strings.Join(value, "|") } else { ext = "_filedir" } @@ -326,7 +345,7 @@ func writeFlagHandler(buf *bytes.Buffer, name string, annotations map[string][]s var ext string if len(value) == 1 { - ext = "__handle_subdirs_in_dir_flag " + value[0] + ext = fmt.Sprintf("__%s_handle_subdirs_in_dir_flag ", cmd.Root().Name()) + value[0] } else { ext = "_filedir -d" } @@ -335,7 +354,7 @@ func writeFlagHandler(buf *bytes.Buffer, name string, annotations map[string][]s } } -func writeShortFlag(buf *bytes.Buffer, flag *pflag.Flag) { +func writeShortFlag(buf *bytes.Buffer, flag *pflag.Flag, cmd *Command) { name := flag.Shorthand format := " " if len(flag.NoOptDefVal) == 0 { @@ -343,10 +362,10 @@ func writeShortFlag(buf *bytes.Buffer, flag *pflag.Flag) { } format += "flags+=(\"-%s\")\n" buf.WriteString(fmt.Sprintf(format, name)) - writeFlagHandler(buf, "-"+name, flag.Annotations) + writeFlagHandler(buf, "-"+name, flag.Annotations, cmd) } -func writeFlag(buf *bytes.Buffer, flag *pflag.Flag) { +func writeFlag(buf *bytes.Buffer, flag *pflag.Flag, cmd *Command) { name := flag.Name format := " flags+=(\"--%s" if len(flag.NoOptDefVal) == 0 { @@ -354,7 +373,7 @@ func writeFlag(buf *bytes.Buffer, flag *pflag.Flag) { } format += "\")\n" buf.WriteString(fmt.Sprintf(format, name)) - writeFlagHandler(buf, "--"+name, flag.Annotations) + writeFlagHandler(buf, "--"+name, flag.Annotations, cmd) } func writeLocalNonPersistentFlag(buf *bytes.Buffer, flag *pflag.Flag) { @@ -380,9 +399,9 @@ func writeFlags(buf *bytes.Buffer, cmd *Command) { if nonCompletableFlag(flag) { return } - writeFlag(buf, flag) + writeFlag(buf, flag, cmd) if len(flag.Shorthand) > 0 { - writeShortFlag(buf, flag) + writeShortFlag(buf, flag, cmd) } if localNonPersistentFlags.Lookup(flag.Name) != nil { writeLocalNonPersistentFlag(buf, flag) @@ -392,9 +411,9 @@ func writeFlags(buf *bytes.Buffer, cmd *Command) { if nonCompletableFlag(flag) { return } - writeFlag(buf, flag) + writeFlag(buf, flag, cmd) if len(flag.Shorthand) > 0 { - writeShortFlag(buf, flag) + writeShortFlag(buf, flag, cmd) } }) @@ -434,6 +453,21 @@ func writeRequiredNouns(buf *bytes.Buffer, cmd *Command) { } } +func writeCmdAliases(buf *bytes.Buffer, cmd *Command) { + if len(cmd.Aliases) == 0 { + return + } + + sort.Sort(sort.StringSlice(cmd.Aliases)) + + buf.WriteString(fmt.Sprint(` if [[ -z "${BASH_VERSION}" || "${BASH_VERSINFO[0]}" -gt 3 ]]; then`, "\n")) + for _, value := range cmd.Aliases { + buf.WriteString(fmt.Sprintf(" command_aliases+=(%q)\n", value)) + buf.WriteString(fmt.Sprintf(" aliashash[%q]=%q\n", value, cmd.Name())) + } + buf.WriteString(` fi`) + buf.WriteString("\n") +} func writeArgAliases(buf *bytes.Buffer, cmd *Command) { buf.WriteString(" noun_aliases=()\n") sort.Sort(sort.StringSlice(cmd.ArgAliases)) @@ -452,8 +486,18 @@ func gen(buf *bytes.Buffer, cmd *Command) { commandName := cmd.CommandPath() commandName = strings.Replace(commandName, " ", "_", -1) commandName = strings.Replace(commandName, ":", "__", -1) - buf.WriteString(fmt.Sprintf("_%s()\n{\n", commandName)) + + if cmd.Root() == cmd { + buf.WriteString(fmt.Sprintf("_%s_root_command()\n{\n", commandName)) + } else { + buf.WriteString(fmt.Sprintf("_%s()\n{\n", commandName)) + } + buf.WriteString(fmt.Sprintf(" last_command=%q\n", commandName)) + buf.WriteString("\n") + buf.WriteString(" command_aliases=()\n") + buf.WriteString("\n") + writeCommands(buf, cmd) writeFlags(buf, cmd) writeRequiredFlag(buf, cmd) @@ -491,17 +535,20 @@ func (c *Command) GenBashCompletionFile(filename string) error { return c.GenBashCompletion(outFile) } -// MarkFlagRequired adds the BashCompOneRequiredFlag annotation to the named flag, if it exists. +// MarkFlagRequired adds the BashCompOneRequiredFlag annotation to the named flag if it exists, +// and causes your command to report an error if invoked without the flag. func (c *Command) MarkFlagRequired(name string) error { return MarkFlagRequired(c.Flags(), name) } -// MarkPersistentFlagRequired adds the BashCompOneRequiredFlag annotation to the named persistent flag, if it exists. +// MarkPersistentFlagRequired adds the BashCompOneRequiredFlag annotation to the named persistent flag if it exists, +// and causes your command to report an error if invoked without the flag. func (c *Command) MarkPersistentFlagRequired(name string) error { return MarkFlagRequired(c.PersistentFlags(), name) } -// MarkFlagRequired adds the BashCompOneRequiredFlag annotation to the named flag in the flag set, if it exists. +// MarkFlagRequired adds the BashCompOneRequiredFlag annotation to the named flag if it exists, +// and causes your command to report an error if invoked without the flag. func MarkFlagRequired(flags *pflag.FlagSet, name string) error { return flags.SetAnnotation(name, BashCompOneRequiredFlag, []string{"true"}) } diff --git a/vendor/github.com/spf13/cobra/bash_completions.md b/vendor/github.com/spf13/cobra/bash_completions.md index 52bd39ddb1d..e79d4769d10 100644 --- a/vendor/github.com/spf13/cobra/bash_completions.md +++ b/vendor/github.com/spf13/cobra/bash_completions.md @@ -6,15 +6,16 @@ Generating bash completions from a cobra command is incredibly easy. An actual p package main import ( - "io/ioutil" - "os" + "io/ioutil" + "os" - "github.com/GoogleCloudPlatform/kubernetes/pkg/kubectl/cmd" + "k8s.io/kubernetes/pkg/kubectl/cmd" + "k8s.io/kubernetes/pkg/kubectl/cmd/util" ) func main() { - kubectl := cmd.NewFactory(nil).NewKubectlCommand(os.Stdin, ioutil.Discard, ioutil.Discard) - kubectl.GenBashCompletionFile("out.sh") + kubectl := cmd.NewKubectlCommand(util.NewFactory(nil), os.Stdin, ioutil.Discard, ioutil.Discard) + kubectl.GenBashCompletionFile("out.sh") } ``` @@ -173,14 +174,14 @@ hello.yml test.json So while there are many other files in the CWD it only shows me subdirs and those with valid extensions. -# Specifiy custom flag completion +# Specify custom flag completion -Similar to the filename completion and filtering using cobra.BashCompFilenameExt, you can specifiy +Similar to the filename completion and filtering using cobra.BashCompFilenameExt, you can specify a custom flag completion function with cobra.BashCompCustom: ```go annotation := make(map[string][]string) - annotation[cobra.BashCompFilenameExt] = []string{"__kubectl_get_namespaces"} + annotation[cobra.BashCompCustom] = []string{"__kubectl_get_namespaces"} flag := &pflag.Flag{ Name: "namespace", @@ -204,3 +205,17 @@ __kubectl_get_namespaces() fi } ``` +# Using bash aliases for commands + +You can also configure the `bash aliases` for the commands and they will also support completions. + +```bash +alias aliasname=origcommand +complete -o default -F __start_origcommand aliasname + +# and now when you run `aliasname` completion will make +# suggestions as it did for `origcommand`. + +$) aliasname +completion firstcommand secondcommand +``` diff --git a/vendor/github.com/spf13/cobra/cobra.go b/vendor/github.com/spf13/cobra/cobra.go index 8928cefc2fa..7010fd15b72 100644 --- a/vendor/github.com/spf13/cobra/cobra.go +++ b/vendor/github.com/spf13/cobra/cobra.go @@ -70,7 +70,8 @@ func AddTemplateFuncs(tmplFuncs template.FuncMap) { } } -// OnInitialize takes a series of func() arguments and appends them to a slice of func(). +// OnInitialize sets the passed functions to be run when each command's +// Execute method is called. func OnInitialize(y ...func()) { initializers = append(initializers, y...) } @@ -188,3 +189,12 @@ func ld(s, t string, ignoreCase bool) int { } return d[len(s)][len(t)] } + +func stringInSlice(a string, list []string) bool { + for _, b := range list { + if b == a { + return true + } + } + return false +} diff --git a/vendor/github.com/spf13/cobra/command.go b/vendor/github.com/spf13/cobra/command.go index 58e6ceb0778..34d1bf36712 100644 --- a/vendor/github.com/spf13/cobra/command.go +++ b/vendor/github.com/spf13/cobra/command.go @@ -27,6 +27,9 @@ import ( flag "github.com/spf13/pflag" ) +// FParseErrWhitelist configures Flag parse errors to be ignored +type FParseErrWhitelist flag.ParseErrorsWhitelist + // Command is just that, a command for your application. // E.g. 'go run ...' - 'run' is the command. Cobra requires // you to define the usage and description as part of your command @@ -75,6 +78,11 @@ type Command struct { // group commands. Annotations map[string]string + // Version defines the version for this command. If this value is non-empty and the command does not + // define a "version" flag, a "version" boolean flag will be added to the command and, if specified, + // will print content of the "Version" variable. + Version string + // The *Run functions are executed in the following order: // * PersistentPreRun() // * PreRun() @@ -118,6 +126,10 @@ type Command struct { // will be printed by generating docs for this command. DisableAutoGenTag bool + // DisableFlagsInUseLine will disable the addition of [flags] to the usage + // line of a command when printing help or generating docs + DisableFlagsInUseLine bool + // DisableSuggestions disables the suggestions based on Levenshtein distance // that go along with 'unknown command' messages. DisableSuggestions bool @@ -128,6 +140,9 @@ type Command struct { // TraverseChildren parses flags on all parents before executing child command. TraverseChildren bool + //FParseErrWhitelist flag parse errors to be ignored + FParseErrWhitelist FParseErrWhitelist + // commands is the list of commands supported by this program. commands []*Command // parent is a parent command for this command. @@ -138,6 +153,11 @@ type Command struct { commandsMaxNameLen int // commandsAreSorted defines, if command slice are sorted or not. commandsAreSorted bool + // commandCalledAs is the name or alias value used to call this command. + commandCalledAs struct { + name string + called bool + } // args is actual args parsed from flags. args []string @@ -173,6 +193,8 @@ type Command struct { // helpCommand is command with usage 'help'. If it's not defined by user, // cobra uses default help command. helpCommand *Command + // versionTemplate is the version template defined by user. + versionTemplate string } // SetArgs sets arguments for the command. It is set to os.Args[1:] by default, if desired, can be overridden @@ -218,6 +240,11 @@ func (c *Command) SetHelpTemplate(s string) { c.helpTemplate = s } +// SetVersionTemplate sets version template to be used. Application can use it to set custom template. +func (c *Command) SetVersionTemplate(s string) { + c.versionTemplate = s +} + // SetGlobalNormalizationFunc sets a normalization function to all flag sets and also to child commands. // The user should not have a cyclic dependency on commands. func (c *Command) SetGlobalNormalizationFunc(n func(f *flag.FlagSet, name string) flag.NormalizedName) { @@ -407,6 +434,19 @@ func (c *Command) HelpTemplate() string { {{end}}{{if or .Runnable .HasSubCommands}}{{.UsageString}}{{end}}` } +// VersionTemplate return version template for the command. +func (c *Command) VersionTemplate() string { + if c.versionTemplate != "" { + return c.versionTemplate + } + + if c.HasParent() { + return c.parent.VersionTemplate() + } + return `{{with .Name}}{{printf "%s " .}}{{end}}{{printf "version %s" .Version}} +` +} + func hasNoOptDefVal(name string, fs *flag.FlagSet) bool { flag := fs.Lookup(name) if flag == nil { @@ -441,6 +481,9 @@ Loop: s := args[0] args = args[1:] switch { + case s == "--": + // "--" terminates the flags + break Loop case strings.HasPrefix(s, "--") && !strings.Contains(s, "=") && !hasNoOptDefVal(s[2:], flags): // If '--flag arg' then // delete arg from args. @@ -528,6 +571,7 @@ func (c *Command) findNext(next string) *Command { matches := make([]*Command, 0) for _, cmd := range c.commands { if cmd.Name() == next || cmd.HasAlias(next) { + cmd.commandCalledAs.name = next return cmd } if EnablePrefixMatching && cmd.hasNameOrAliasPrefix(next) { @@ -538,6 +582,7 @@ func (c *Command) findNext(next string) *Command { if len(matches) == 1 { return matches[0] } + return nil } @@ -621,10 +666,8 @@ func (c *Command) Root() *Command { return c } -// ArgsLenAtDash will return the length of f.Args at the moment when a -- was -// found during arg parsing. This allows your program to know which args were -// before the -- and which came after. (Description from -// https://godoc.org/github.com/spf13/pflag#FlagSet.ArgsLenAtDash). +// ArgsLenAtDash will return the length of c.Flags().Args at the moment +// when a -- was found during args parsing. func (c *Command) ArgsLenAtDash() int { return c.Flags().ArgsLenAtDash() } @@ -638,9 +681,10 @@ func (c *Command) execute(a []string) (err error) { c.Printf("Command %q is deprecated, %s\n", c.Name(), c.Deprecated) } - // initialize help flag as the last point possible to allow for user + // initialize help and version flag at the last point possible to allow for user // overriding c.InitDefaultHelpFlag() + c.InitDefaultVersionFlag() err = c.ParseFlags(a) if err != nil { @@ -657,7 +701,27 @@ func (c *Command) execute(a []string) (err error) { return err } - if helpVal || !c.Runnable() { + if helpVal { + return flag.ErrHelp + } + + // for back-compat, only add version flag behavior if version is defined + if c.Version != "" { + versionVal, err := c.Flags().GetBool("version") + if err != nil { + c.Println("\"version\" flag declared as non-bool. Please correct your code") + return err + } + if versionVal { + err := tmpl(c.OutOrStdout(), c.VersionTemplate(), c) + if err != nil { + c.Println(err) + } + return err + } + } + + if !c.Runnable() { return flag.ErrHelp } @@ -780,6 +844,11 @@ func (c *Command) ExecuteC() (cmd *Command, err error) { return c, err } + cmd.commandCalledAs.called = true + if cmd.commandCalledAs.name == "" { + cmd.commandCalledAs.name = cmd.Name() + } + err = cmd.execute(flags) if err != nil { // Always show help if requested, even if SilenceErrors is in @@ -825,7 +894,7 @@ func (c *Command) validateRequiredFlags() error { }) if len(missingFlagNames) > 0 { - return fmt.Errorf(`Required flag(s) "%s" have/has not been set`, strings.Join(missingFlagNames, `", "`)) + return fmt.Errorf(`required flag(s) "%s" not set`, strings.Join(missingFlagNames, `", "`)) } return nil } @@ -846,6 +915,27 @@ func (c *Command) InitDefaultHelpFlag() { } } +// InitDefaultVersionFlag adds default version flag to c. +// It is called automatically by executing the c. +// If c already has a version flag, it will do nothing. +// If c.Version is empty, it will do nothing. +func (c *Command) InitDefaultVersionFlag() { + if c.Version == "" { + return + } + + c.mergePersistentFlags() + if c.Flags().Lookup("version") == nil { + usage := "version for " + if c.Name() == "" { + usage += "this command" + } else { + usage += c.Name() + } + c.Flags().Bool("version", false, usage) + } +} + // InitDefaultHelpCmd adds default help command to c. // It is called automatically by executing the c or by calling help and usage. // If c already has help command or c has no subcommands, it will do nothing. @@ -877,7 +967,7 @@ Simply type ` + c.Name() + ` help [path to command] for full details.`, c.AddCommand(c.helpCommand) } -// ResetCommands used for testing. +// ResetCommands delete parent, subcommand and help command from c. func (c *Command) ResetCommands() { c.parent = nil c.commands = nil @@ -996,6 +1086,9 @@ func (c *Command) UseLine() string { } else { useline = c.Use } + if c.DisableFlagsInUseLine { + return useline + } if c.HasAvailableFlags() && !strings.Contains(useline, "[flags]") { useline += " [flags]" } @@ -1063,14 +1156,25 @@ func (c *Command) HasAlias(s string) bool { return false } +// CalledAs returns the command name or alias that was used to invoke +// this command or an empty string if the command has not been called. +func (c *Command) CalledAs() string { + if c.commandCalledAs.called { + return c.commandCalledAs.name + } + return "" +} + // hasNameOrAliasPrefix returns true if the Name or any of aliases start // with prefix func (c *Command) hasNameOrAliasPrefix(prefix string) bool { if strings.HasPrefix(c.Name(), prefix) { + c.commandCalledAs.name = c.Name() return true } for _, alias := range c.Aliases { if strings.HasPrefix(alias, prefix) { + c.commandCalledAs.name = alias return true } } @@ -1163,7 +1267,7 @@ func (c *Command) HasAvailableSubCommands() bool { } } - // the command either has no sub comamnds, or no available (non deprecated/help/hidden) + // the command either has no sub commands, or no available (non deprecated/help/hidden) // sub commands return false } @@ -1173,7 +1277,7 @@ func (c *Command) HasParent() bool { return c.parent != nil } -// GlobalNormalizationFunc returns the global normalization function or nil if doesn't exists. +// GlobalNormalizationFunc returns the global normalization function or nil if it doesn't exist. func (c *Command) GlobalNormalizationFunc() func(f *flag.FlagSet, name string) flag.NormalizedName { return c.globNormFunc } @@ -1273,7 +1377,7 @@ func (c *Command) PersistentFlags() *flag.FlagSet { return c.pflags } -// ResetFlags is used in testing. +// ResetFlags deletes all flags from command. func (c *Command) ResetFlags() { c.flagErrorBuf = new(bytes.Buffer) c.flagErrorBuf.Reset() @@ -1365,6 +1469,10 @@ func (c *Command) ParseFlags(args []string) error { } beforeErrorBufLen := c.flagErrorBuf.Len() c.mergePersistentFlags() + + //do it here after merging all flags and just before parse + c.Flags().ParseErrorsWhitelist = flag.ParseErrorsWhitelist(c.FParseErrWhitelist) + err := c.Flags().Parse(args) // Print warnings if they occurred (e.g. deprecated flag messages). if c.flagErrorBuf.Len()-beforeErrorBufLen > 0 && err == nil { diff --git a/vendor/github.com/spf13/pflag/bytes.go b/vendor/github.com/spf13/pflag/bytes.go new file mode 100644 index 00000000000..12c58db9fe3 --- /dev/null +++ b/vendor/github.com/spf13/pflag/bytes.go @@ -0,0 +1,105 @@ +package pflag + +import ( + "encoding/hex" + "fmt" + "strings" +) + +// BytesHex adapts []byte for use as a flag. Value of flag is HEX encoded +type bytesHexValue []byte + +func (bytesHex bytesHexValue) String() string { + return fmt.Sprintf("%X", []byte(bytesHex)) +} + +func (bytesHex *bytesHexValue) Set(value string) error { + bin, err := hex.DecodeString(strings.TrimSpace(value)) + + if err != nil { + return err + } + + *bytesHex = bin + + return nil +} + +func (*bytesHexValue) Type() string { + return "bytesHex" +} + +func newBytesHexValue(val []byte, p *[]byte) *bytesHexValue { + *p = val + return (*bytesHexValue)(p) +} + +func bytesHexConv(sval string) (interface{}, error) { + + bin, err := hex.DecodeString(sval) + + if err == nil { + return bin, nil + } + + return nil, fmt.Errorf("invalid string being converted to Bytes: %s %s", sval, err) +} + +// GetBytesHex return the []byte value of a flag with the given name +func (f *FlagSet) GetBytesHex(name string) ([]byte, error) { + val, err := f.getFlagType(name, "bytesHex", bytesHexConv) + + if err != nil { + return []byte{}, err + } + + return val.([]byte), nil +} + +// BytesHexVar defines an []byte flag with specified name, default value, and usage string. +// The argument p points to an []byte variable in which to store the value of the flag. +func (f *FlagSet) BytesHexVar(p *[]byte, name string, value []byte, usage string) { + f.VarP(newBytesHexValue(value, p), name, "", usage) +} + +// BytesHexVarP is like BytesHexVar, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) BytesHexVarP(p *[]byte, name, shorthand string, value []byte, usage string) { + f.VarP(newBytesHexValue(value, p), name, shorthand, usage) +} + +// BytesHexVar defines an []byte flag with specified name, default value, and usage string. +// The argument p points to an []byte variable in which to store the value of the flag. +func BytesHexVar(p *[]byte, name string, value []byte, usage string) { + CommandLine.VarP(newBytesHexValue(value, p), name, "", usage) +} + +// BytesHexVarP is like BytesHexVar, but accepts a shorthand letter that can be used after a single dash. +func BytesHexVarP(p *[]byte, name, shorthand string, value []byte, usage string) { + CommandLine.VarP(newBytesHexValue(value, p), name, shorthand, usage) +} + +// BytesHex defines an []byte flag with specified name, default value, and usage string. +// The return value is the address of an []byte variable that stores the value of the flag. +func (f *FlagSet) BytesHex(name string, value []byte, usage string) *[]byte { + p := new([]byte) + f.BytesHexVarP(p, name, "", value, usage) + return p +} + +// BytesHexP is like BytesHex, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) BytesHexP(name, shorthand string, value []byte, usage string) *[]byte { + p := new([]byte) + f.BytesHexVarP(p, name, shorthand, value, usage) + return p +} + +// BytesHex defines an []byte flag with specified name, default value, and usage string. +// The return value is the address of an []byte variable that stores the value of the flag. +func BytesHex(name string, value []byte, usage string) *[]byte { + return CommandLine.BytesHexP(name, "", value, usage) +} + +// BytesHexP is like BytesHex, but accepts a shorthand letter that can be used after a single dash. +func BytesHexP(name, shorthand string, value []byte, usage string) *[]byte { + return CommandLine.BytesHexP(name, shorthand, value, usage) +} diff --git a/vendor/github.com/spf13/pflag/count.go b/vendor/github.com/spf13/pflag/count.go index 250a43814c9..aa126e44d1c 100644 --- a/vendor/github.com/spf13/pflag/count.go +++ b/vendor/github.com/spf13/pflag/count.go @@ -11,13 +11,13 @@ func newCountValue(val int, p *int) *countValue { } func (i *countValue) Set(s string) error { - v, err := strconv.ParseInt(s, 0, 64) - // -1 means that no specific value was passed, so increment - if v == -1 { + // "+1" means that no specific value was passed, so increment + if s == "+1" { *i = countValue(*i + 1) - } else { - *i = countValue(v) + return nil } + v, err := strconv.ParseInt(s, 0, 0) + *i = countValue(v) return err } @@ -54,7 +54,7 @@ func (f *FlagSet) CountVar(p *int, name string, usage string) { // CountVarP is like CountVar only take a shorthand for the flag name. func (f *FlagSet) CountVarP(p *int, name, shorthand string, usage string) { flag := f.VarPF(newCountValue(0, p), name, shorthand, usage) - flag.NoOptDefVal = "-1" + flag.NoOptDefVal = "+1" } // CountVar like CountVar only the flag is placed on the CommandLine instead of a given flag set diff --git a/vendor/github.com/spf13/pflag/duration_slice.go b/vendor/github.com/spf13/pflag/duration_slice.go new file mode 100644 index 00000000000..52c6b6dc104 --- /dev/null +++ b/vendor/github.com/spf13/pflag/duration_slice.go @@ -0,0 +1,128 @@ +package pflag + +import ( + "fmt" + "strings" + "time" +) + +// -- durationSlice Value +type durationSliceValue struct { + value *[]time.Duration + changed bool +} + +func newDurationSliceValue(val []time.Duration, p *[]time.Duration) *durationSliceValue { + dsv := new(durationSliceValue) + dsv.value = p + *dsv.value = val + return dsv +} + +func (s *durationSliceValue) Set(val string) error { + ss := strings.Split(val, ",") + out := make([]time.Duration, len(ss)) + for i, d := range ss { + var err error + out[i], err = time.ParseDuration(d) + if err != nil { + return err + } + + } + if !s.changed { + *s.value = out + } else { + *s.value = append(*s.value, out...) + } + s.changed = true + return nil +} + +func (s *durationSliceValue) Type() string { + return "durationSlice" +} + +func (s *durationSliceValue) String() string { + out := make([]string, len(*s.value)) + for i, d := range *s.value { + out[i] = fmt.Sprintf("%s", d) + } + return "[" + strings.Join(out, ",") + "]" +} + +func durationSliceConv(val string) (interface{}, error) { + val = strings.Trim(val, "[]") + // Empty string would cause a slice with one (empty) entry + if len(val) == 0 { + return []time.Duration{}, nil + } + ss := strings.Split(val, ",") + out := make([]time.Duration, len(ss)) + for i, d := range ss { + var err error + out[i], err = time.ParseDuration(d) + if err != nil { + return nil, err + } + + } + return out, nil +} + +// GetDurationSlice returns the []time.Duration value of a flag with the given name +func (f *FlagSet) GetDurationSlice(name string) ([]time.Duration, error) { + val, err := f.getFlagType(name, "durationSlice", durationSliceConv) + if err != nil { + return []time.Duration{}, err + } + return val.([]time.Duration), nil +} + +// DurationSliceVar defines a durationSlice flag with specified name, default value, and usage string. +// The argument p points to a []time.Duration variable in which to store the value of the flag. +func (f *FlagSet) DurationSliceVar(p *[]time.Duration, name string, value []time.Duration, usage string) { + f.VarP(newDurationSliceValue(value, p), name, "", usage) +} + +// DurationSliceVarP is like DurationSliceVar, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) DurationSliceVarP(p *[]time.Duration, name, shorthand string, value []time.Duration, usage string) { + f.VarP(newDurationSliceValue(value, p), name, shorthand, usage) +} + +// DurationSliceVar defines a duration[] flag with specified name, default value, and usage string. +// The argument p points to a duration[] variable in which to store the value of the flag. +func DurationSliceVar(p *[]time.Duration, name string, value []time.Duration, usage string) { + CommandLine.VarP(newDurationSliceValue(value, p), name, "", usage) +} + +// DurationSliceVarP is like DurationSliceVar, but accepts a shorthand letter that can be used after a single dash. +func DurationSliceVarP(p *[]time.Duration, name, shorthand string, value []time.Duration, usage string) { + CommandLine.VarP(newDurationSliceValue(value, p), name, shorthand, usage) +} + +// DurationSlice defines a []time.Duration flag with specified name, default value, and usage string. +// The return value is the address of a []time.Duration variable that stores the value of the flag. +func (f *FlagSet) DurationSlice(name string, value []time.Duration, usage string) *[]time.Duration { + p := []time.Duration{} + f.DurationSliceVarP(&p, name, "", value, usage) + return &p +} + +// DurationSliceP is like DurationSlice, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) DurationSliceP(name, shorthand string, value []time.Duration, usage string) *[]time.Duration { + p := []time.Duration{} + f.DurationSliceVarP(&p, name, shorthand, value, usage) + return &p +} + +// DurationSlice defines a []time.Duration flag with specified name, default value, and usage string. +// The return value is the address of a []time.Duration variable that stores the value of the flag. +func DurationSlice(name string, value []time.Duration, usage string) *[]time.Duration { + return CommandLine.DurationSliceP(name, "", value, usage) +} + +// DurationSliceP is like DurationSlice, but accepts a shorthand letter that can be used after a single dash. +func DurationSliceP(name, shorthand string, value []time.Duration, usage string) *[]time.Duration { + return CommandLine.DurationSliceP(name, shorthand, value, usage) +} diff --git a/vendor/github.com/spf13/pflag/flag.go b/vendor/github.com/spf13/pflag/flag.go index 6f1fc3007a8..5eadc84e3c4 100644 --- a/vendor/github.com/spf13/pflag/flag.go +++ b/vendor/github.com/spf13/pflag/flag.go @@ -101,6 +101,7 @@ package pflag import ( "bytes" "errors" + goflag "flag" "fmt" "io" "os" @@ -123,6 +124,12 @@ const ( PanicOnError ) +// ParseErrorsWhitelist defines the parsing errors that can be ignored +type ParseErrorsWhitelist struct { + // UnknownFlags will ignore unknown flags errors and continue parsing rest of the flags + UnknownFlags bool +} + // NormalizedName is a flag name that has been normalized according to rules // for the FlagSet (e.g. making '-' and '_' equivalent). type NormalizedName string @@ -138,6 +145,9 @@ type FlagSet struct { // help/usage messages. SortFlags bool + // ParseErrorsWhitelist is used to configure a whitelist of errors + ParseErrorsWhitelist ParseErrorsWhitelist + name string parsed bool actual map[NormalizedName]*Flag @@ -153,6 +163,8 @@ type FlagSet struct { output io.Writer // nil means stderr; use out() accessor interspersed bool // allow interspersed option/non-option args normalizeNameFunc func(f *FlagSet, name string) NormalizedName + + addedGoFlagSets []*goflag.FlagSet } // A Flag represents the state of a flag. @@ -202,12 +214,18 @@ func sortFlags(flags map[NormalizedName]*Flag) []*Flag { func (f *FlagSet) SetNormalizeFunc(n func(f *FlagSet, name string) NormalizedName) { f.normalizeNameFunc = n f.sortedFormal = f.sortedFormal[:0] - for k, v := range f.orderedFormal { - delete(f.formal, NormalizedName(v.Name)) - nname := f.normalizeFlagName(v.Name) - v.Name = string(nname) - f.formal[nname] = v - f.orderedFormal[k] = v + for fname, flag := range f.formal { + nname := f.normalizeFlagName(flag.Name) + if fname == nname { + continue + } + flag.Name = string(nname) + delete(f.formal, fname) + f.formal[nname] = flag + if _, set := f.actual[fname]; set { + delete(f.actual, fname) + f.actual[nname] = flag + } } } @@ -261,16 +279,16 @@ func (f *FlagSet) VisitAll(fn func(*Flag)) { } } -// HasFlags returns a bool to indicate if the FlagSet has any flags definied. +// HasFlags returns a bool to indicate if the FlagSet has any flags defined. func (f *FlagSet) HasFlags() bool { return len(f.formal) > 0 } // HasAvailableFlags returns a bool to indicate if the FlagSet has any flags -// definied that are not hidden or deprecated. +// that are not hidden. func (f *FlagSet) HasAvailableFlags() bool { for _, flag := range f.formal { - if !flag.Hidden && len(flag.Deprecated) == 0 { + if !flag.Hidden { return true } } @@ -380,6 +398,7 @@ func (f *FlagSet) MarkDeprecated(name string, usageMessage string) error { return fmt.Errorf("deprecated message for flag %q must be set", name) } flag.Deprecated = usageMessage + flag.Hidden = true return nil } @@ -440,13 +459,15 @@ func (f *FlagSet) Set(name, value string) error { return fmt.Errorf("invalid argument %q for %q flag: %v", value, flagName, err) } - if f.actual == nil { - f.actual = make(map[NormalizedName]*Flag) - } - f.actual[normalName] = flag - f.orderedActual = append(f.orderedActual, flag) + if !flag.Changed { + if f.actual == nil { + f.actual = make(map[NormalizedName]*Flag) + } + f.actual[normalName] = flag + f.orderedActual = append(f.orderedActual, flag) - flag.Changed = true + flag.Changed = true + } if flag.Deprecated != "" { fmt.Fprintf(f.out(), "Flag --%s has been deprecated, %s\n", flag.Name, flag.Deprecated) @@ -556,6 +577,14 @@ func UnquoteUsage(flag *Flag) (name string, usage string) { name = "int" case "uint64": name = "uint" + case "stringSlice": + name = "strings" + case "intSlice": + name = "ints" + case "uintSlice": + name = "uints" + case "boolSlice": + name = "bools" } return @@ -570,11 +599,14 @@ func wrapN(i, slop int, s string) (string, string) { return s, "" } - w := strings.LastIndexAny(s[:i], " \t") + w := strings.LastIndexAny(s[:i], " \t\n") if w <= 0 { return s, "" } - + nlPos := strings.LastIndex(s[:i], "\n") + if nlPos > 0 && nlPos < w { + return s[:nlPos], s[nlPos+1:] + } return s[:w], s[w+1:] } @@ -583,7 +615,7 @@ func wrapN(i, slop int, s string) (string, string) { // caller). Pass `w` == 0 to do no wrapping func wrap(i, w int, s string) string { if w == 0 { - return s + return strings.Replace(s, "\n", "\n"+strings.Repeat(" ", i), -1) } // space between indent i and end of line width w into which @@ -601,7 +633,7 @@ func wrap(i, w int, s string) string { } // If still not enough space then don't even try to wrap. if wrap < 24 { - return s + return strings.Replace(s, "\n", r, -1) } // Try to avoid short orphan words on the final line, by @@ -613,14 +645,14 @@ func wrap(i, w int, s string) string { // Handle first line, which is indented by the caller (or the // special case above) l, s = wrapN(wrap, slop, s) - r = r + l + r = r + strings.Replace(l, "\n", "\n"+strings.Repeat(" ", i), -1) // Now wrap the rest for s != "" { var t string t, s = wrapN(wrap, slop, s) - r = r + "\n" + strings.Repeat(" ", i) + t + r = r + "\n" + strings.Repeat(" ", i) + strings.Replace(t, "\n", "\n"+strings.Repeat(" ", i), -1) } return r @@ -637,7 +669,7 @@ func (f *FlagSet) FlagUsagesWrapped(cols int) string { maxlen := 0 f.VisitAll(func(flag *Flag) { - if flag.Deprecated != "" || flag.Hidden { + if flag.Hidden { return } @@ -660,6 +692,10 @@ func (f *FlagSet) FlagUsagesWrapped(cols int) string { if flag.NoOptDefVal != "true" { line += fmt.Sprintf("[=%s]", flag.NoOptDefVal) } + case "count": + if flag.NoOptDefVal != "+1" { + line += fmt.Sprintf("[=%s]", flag.NoOptDefVal) + } default: line += fmt.Sprintf("[=%s]", flag.NoOptDefVal) } @@ -680,6 +716,9 @@ func (f *FlagSet) FlagUsagesWrapped(cols int) string { line += fmt.Sprintf(" (default %s)", flag.DefValue) } } + if len(flag.Deprecated) != 0 { + line += fmt.Sprintf(" (DEPRECATED: %s)", flag.Deprecated) + } lines = append(lines, line) }) @@ -857,8 +896,10 @@ func VarP(value Value, name, shorthand, usage string) { // returns the error. func (f *FlagSet) failf(format string, a ...interface{}) error { err := fmt.Errorf(format, a...) - fmt.Fprintln(f.out(), err) - f.usage() + if f.errorHandling != ContinueOnError { + fmt.Fprintln(f.out(), err) + f.usage() + } return err } @@ -874,6 +915,25 @@ func (f *FlagSet) usage() { } } +//--unknown (args will be empty) +//--unknown --next-flag ... (args will be --next-flag ...) +//--unknown arg ... (args will be arg ...) +func stripUnknownFlagValue(args []string) []string { + if len(args) == 0 { + //--unknown + return args + } + + first := args[0] + if first[0] == '-' { + //--unknown --next-flag ... + return args + } + + //--unknown arg ... (args will be arg ...) + return args[1:] +} + func (f *FlagSet) parseLongArg(s string, args []string, fn parseFunc) (a []string, err error) { a = args name := s[2:] @@ -885,13 +945,24 @@ func (f *FlagSet) parseLongArg(s string, args []string, fn parseFunc) (a []strin split := strings.SplitN(name, "=", 2) name = split[0] flag, exists := f.formal[f.normalizeFlagName(name)] + if !exists { - if name == "help" { // special case for nice help message. + switch { + case name == "help": f.usage() return a, ErrHelp + case f.ParseErrorsWhitelist.UnknownFlags: + // --unknown=unknownval arg ... + // we do not want to lose arg in this case + if len(split) >= 2 { + return a, nil + } + + return stripUnknownFlagValue(a), nil + default: + err = f.failf("unknown flag: --%s", name) + return } - err = f.failf("unknown flag: --%s", name) - return } var value string @@ -912,6 +983,9 @@ func (f *FlagSet) parseLongArg(s string, args []string, fn parseFunc) (a []strin } err = fn(flag, value) + if err != nil { + f.failf(err.Error()) + } return } @@ -926,13 +1000,25 @@ func (f *FlagSet) parseSingleShortArg(shorthands string, args []string, fn parse flag, exists := f.shorthands[c] if !exists { - if c == 'h' { // special case for nice help message. + switch { + case c == 'h': f.usage() err = ErrHelp return + case f.ParseErrorsWhitelist.UnknownFlags: + // '-f=arg arg ...' + // we do not want to lose arg in this case + if len(shorthands) > 2 && shorthands[1] == '=' { + outShorts = "" + return + } + + outArgs = stripUnknownFlagValue(outArgs) + return + default: + err = f.failf("unknown shorthand flag: %q in -%s", c, shorthands) + return } - err = f.failf("unknown shorthand flag: %q in -%s", c, shorthands) - return } var value string @@ -962,6 +1048,9 @@ func (f *FlagSet) parseSingleShortArg(shorthands string, args []string, fn parse } err = fn(flag, value) + if err != nil { + f.failf(err.Error()) + } return } @@ -1016,6 +1105,11 @@ func (f *FlagSet) parseArgs(args []string, fn parseFunc) (err error) { // are defined and before flags are accessed by the program. // The return value will be ErrHelp if -help was set but not defined. func (f *FlagSet) Parse(arguments []string) error { + if f.addedGoFlagSets != nil { + for _, goFlagSet := range f.addedGoFlagSets { + goFlagSet.Parse(nil) + } + } f.parsed = true if len(arguments) < 0 { @@ -1034,6 +1128,7 @@ func (f *FlagSet) Parse(arguments []string) error { case ContinueOnError: return err case ExitOnError: + fmt.Println(err) os.Exit(2) case PanicOnError: panic(err) diff --git a/vendor/github.com/spf13/pflag/golangflag.go b/vendor/github.com/spf13/pflag/golangflag.go index c4f47ebe590..d3dd72b7fee 100644 --- a/vendor/github.com/spf13/pflag/golangflag.go +++ b/vendor/github.com/spf13/pflag/golangflag.go @@ -98,4 +98,8 @@ func (f *FlagSet) AddGoFlagSet(newSet *goflag.FlagSet) { newSet.VisitAll(func(goflag *goflag.Flag) { f.AddGoFlag(goflag) }) + if f.addedGoFlagSets == nil { + f.addedGoFlagSets = make([]*goflag.FlagSet, 0) + } + f.addedGoFlagSets = append(f.addedGoFlagSets, newSet) } diff --git a/vendor/github.com/spf13/pflag/int16.go b/vendor/github.com/spf13/pflag/int16.go new file mode 100644 index 00000000000..f1a01d05e69 --- /dev/null +++ b/vendor/github.com/spf13/pflag/int16.go @@ -0,0 +1,88 @@ +package pflag + +import "strconv" + +// -- int16 Value +type int16Value int16 + +func newInt16Value(val int16, p *int16) *int16Value { + *p = val + return (*int16Value)(p) +} + +func (i *int16Value) Set(s string) error { + v, err := strconv.ParseInt(s, 0, 16) + *i = int16Value(v) + return err +} + +func (i *int16Value) Type() string { + return "int16" +} + +func (i *int16Value) String() string { return strconv.FormatInt(int64(*i), 10) } + +func int16Conv(sval string) (interface{}, error) { + v, err := strconv.ParseInt(sval, 0, 16) + if err != nil { + return 0, err + } + return int16(v), nil +} + +// GetInt16 returns the int16 value of a flag with the given name +func (f *FlagSet) GetInt16(name string) (int16, error) { + val, err := f.getFlagType(name, "int16", int16Conv) + if err != nil { + return 0, err + } + return val.(int16), nil +} + +// Int16Var defines an int16 flag with specified name, default value, and usage string. +// The argument p points to an int16 variable in which to store the value of the flag. +func (f *FlagSet) Int16Var(p *int16, name string, value int16, usage string) { + f.VarP(newInt16Value(value, p), name, "", usage) +} + +// Int16VarP is like Int16Var, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Int16VarP(p *int16, name, shorthand string, value int16, usage string) { + f.VarP(newInt16Value(value, p), name, shorthand, usage) +} + +// Int16Var defines an int16 flag with specified name, default value, and usage string. +// The argument p points to an int16 variable in which to store the value of the flag. +func Int16Var(p *int16, name string, value int16, usage string) { + CommandLine.VarP(newInt16Value(value, p), name, "", usage) +} + +// Int16VarP is like Int16Var, but accepts a shorthand letter that can be used after a single dash. +func Int16VarP(p *int16, name, shorthand string, value int16, usage string) { + CommandLine.VarP(newInt16Value(value, p), name, shorthand, usage) +} + +// Int16 defines an int16 flag with specified name, default value, and usage string. +// The return value is the address of an int16 variable that stores the value of the flag. +func (f *FlagSet) Int16(name string, value int16, usage string) *int16 { + p := new(int16) + f.Int16VarP(p, name, "", value, usage) + return p +} + +// Int16P is like Int16, but accepts a shorthand letter that can be used after a single dash. +func (f *FlagSet) Int16P(name, shorthand string, value int16, usage string) *int16 { + p := new(int16) + f.Int16VarP(p, name, shorthand, value, usage) + return p +} + +// Int16 defines an int16 flag with specified name, default value, and usage string. +// The return value is the address of an int16 variable that stores the value of the flag. +func Int16(name string, value int16, usage string) *int16 { + return CommandLine.Int16P(name, "", value, usage) +} + +// Int16P is like Int16, but accepts a shorthand letter that can be used after a single dash. +func Int16P(name, shorthand string, value int16, usage string) *int16 { + return CommandLine.Int16P(name, shorthand, value, usage) +} diff --git a/vendor/github.com/spf13/pflag/string_array.go b/vendor/github.com/spf13/pflag/string_array.go index 276b7ed49ed..fa7bc60187a 100644 --- a/vendor/github.com/spf13/pflag/string_array.go +++ b/vendor/github.com/spf13/pflag/string_array.go @@ -52,7 +52,7 @@ func (f *FlagSet) GetStringArray(name string) ([]string, error) { // StringArrayVar defines a string flag with specified name, default value, and usage string. // The argument p points to a []string variable in which to store the values of the multiple flags. -// The value of each argument will not try to be separated by comma +// The value of each argument will not try to be separated by comma. Use a StringSlice for that. func (f *FlagSet) StringArrayVar(p *[]string, name string, value []string, usage string) { f.VarP(newStringArrayValue(value, p), name, "", usage) } @@ -64,7 +64,7 @@ func (f *FlagSet) StringArrayVarP(p *[]string, name, shorthand string, value []s // StringArrayVar defines a string flag with specified name, default value, and usage string. // The argument p points to a []string variable in which to store the value of the flag. -// The value of each argument will not try to be separated by comma +// The value of each argument will not try to be separated by comma. Use a StringSlice for that. func StringArrayVar(p *[]string, name string, value []string, usage string) { CommandLine.VarP(newStringArrayValue(value, p), name, "", usage) } @@ -76,7 +76,7 @@ func StringArrayVarP(p *[]string, name, shorthand string, value []string, usage // StringArray defines a string flag with specified name, default value, and usage string. // The return value is the address of a []string variable that stores the value of the flag. -// The value of each argument will not try to be separated by comma +// The value of each argument will not try to be separated by comma. Use a StringSlice for that. func (f *FlagSet) StringArray(name string, value []string, usage string) *[]string { p := []string{} f.StringArrayVarP(&p, name, "", value, usage) @@ -92,7 +92,7 @@ func (f *FlagSet) StringArrayP(name, shorthand string, value []string, usage str // StringArray defines a string flag with specified name, default value, and usage string. // The return value is the address of a []string variable that stores the value of the flag. -// The value of each argument will not try to be separated by comma +// The value of each argument will not try to be separated by comma. Use a StringSlice for that. func StringArray(name string, value []string, usage string) *[]string { return CommandLine.StringArrayP(name, "", value, usage) } diff --git a/vendor/github.com/spf13/pflag/string_slice.go b/vendor/github.com/spf13/pflag/string_slice.go index 05eee75438d..0cd3ccc083e 100644 --- a/vendor/github.com/spf13/pflag/string_slice.go +++ b/vendor/github.com/spf13/pflag/string_slice.go @@ -82,6 +82,11 @@ func (f *FlagSet) GetStringSlice(name string) ([]string, error) { // StringSliceVar defines a string flag with specified name, default value, and usage string. // The argument p points to a []string variable in which to store the value of the flag. +// Compared to StringArray flags, StringSlice flags take comma-separated value as arguments and split them accordingly. +// For example: +// --ss="v1,v2" -ss="v3" +// will result in +// []string{"v1", "v2", "v3"} func (f *FlagSet) StringSliceVar(p *[]string, name string, value []string, usage string) { f.VarP(newStringSliceValue(value, p), name, "", usage) } @@ -93,6 +98,11 @@ func (f *FlagSet) StringSliceVarP(p *[]string, name, shorthand string, value []s // StringSliceVar defines a string flag with specified name, default value, and usage string. // The argument p points to a []string variable in which to store the value of the flag. +// Compared to StringArray flags, StringSlice flags take comma-separated value as arguments and split them accordingly. +// For example: +// --ss="v1,v2" -ss="v3" +// will result in +// []string{"v1", "v2", "v3"} func StringSliceVar(p *[]string, name string, value []string, usage string) { CommandLine.VarP(newStringSliceValue(value, p), name, "", usage) } @@ -104,6 +114,11 @@ func StringSliceVarP(p *[]string, name, shorthand string, value []string, usage // StringSlice defines a string flag with specified name, default value, and usage string. // The return value is the address of a []string variable that stores the value of the flag. +// Compared to StringArray flags, StringSlice flags take comma-separated value as arguments and split them accordingly. +// For example: +// --ss="v1,v2" -ss="v3" +// will result in +// []string{"v1", "v2", "v3"} func (f *FlagSet) StringSlice(name string, value []string, usage string) *[]string { p := []string{} f.StringSliceVarP(&p, name, "", value, usage) @@ -119,6 +134,11 @@ func (f *FlagSet) StringSliceP(name, shorthand string, value []string, usage str // StringSlice defines a string flag with specified name, default value, and usage string. // The return value is the address of a []string variable that stores the value of the flag. +// Compared to StringArray flags, StringSlice flags take comma-separated value as arguments and split them accordingly. +// For example: +// --ss="v1,v2" -ss="v3" +// will result in +// []string{"v1", "v2", "v3"} func StringSlice(name string, value []string, usage string) *[]string { return CommandLine.StringSliceP(name, "", value, usage) } diff --git a/vendor/golang.org/x/crypto/openpgp/keys.go b/vendor/golang.org/x/crypto/openpgp/keys.go index fd582a89c0c..aceb1b79636 100644 --- a/vendor/golang.org/x/crypto/openpgp/keys.go +++ b/vendor/golang.org/x/crypto/openpgp/keys.go @@ -500,6 +500,10 @@ func NewEntity(name, comment, email string, config *packet.Config) (*Entity, err IssuerKeyId: &e.PrimaryKey.KeyId, }, } + err = e.Identities[uid.Id].SelfSignature.SignUserId(uid.Id, e.PrimaryKey, e.PrivateKey, config) + if err != nil { + return nil, err + } // If the user passes in a DefaultHash via packet.Config, // set the PreferredHash for the SelfSignature. @@ -529,14 +533,17 @@ func NewEntity(name, comment, email string, config *packet.Config) (*Entity, err } e.Subkeys[0].PublicKey.IsSubkey = true e.Subkeys[0].PrivateKey.IsSubkey = true - + err = e.Subkeys[0].Sig.SignKey(e.Subkeys[0].PublicKey, e.PrivateKey, config) + if err != nil { + return nil, err + } return e, nil } // SerializePrivate serializes an Entity, including private key material, to // the given Writer. For now, it must only be used on an Entity returned from // NewEntity. -// If config is nil, sensible defaults will be used. +// config is ignored func (e *Entity) SerializePrivate(w io.Writer, config *packet.Config) (err error) { err = e.PrivateKey.Serialize(w) if err != nil { @@ -547,10 +554,6 @@ func (e *Entity) SerializePrivate(w io.Writer, config *packet.Config) (err error if err != nil { return } - err = ident.SelfSignature.SignUserId(ident.UserId.Id, e.PrimaryKey, e.PrivateKey, config) - if err != nil { - return - } err = ident.SelfSignature.Serialize(w) if err != nil { return @@ -561,10 +564,6 @@ func (e *Entity) SerializePrivate(w io.Writer, config *packet.Config) (err error if err != nil { return } - err = subkey.Sig.SignKey(subkey.PublicKey, e.PrivateKey, config) - if err != nil { - return - } err = subkey.Sig.Serialize(w) if err != nil { return diff --git a/vendor/golang.org/x/crypto/openpgp/packet/encrypted_key.go b/vendor/golang.org/x/crypto/openpgp/packet/encrypted_key.go index 266840d05a3..02b372cf374 100644 --- a/vendor/golang.org/x/crypto/openpgp/packet/encrypted_key.go +++ b/vendor/golang.org/x/crypto/openpgp/packet/encrypted_key.go @@ -42,12 +42,18 @@ func (e *EncryptedKey) parse(r io.Reader) (err error) { switch e.Algo { case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly: e.encryptedMPI1.bytes, e.encryptedMPI1.bitLength, err = readMPI(r) + if err != nil { + return + } case PubKeyAlgoElGamal: e.encryptedMPI1.bytes, e.encryptedMPI1.bitLength, err = readMPI(r) if err != nil { return } e.encryptedMPI2.bytes, e.encryptedMPI2.bitLength, err = readMPI(r) + if err != nil { + return + } } _, err = consumeAll(r) return @@ -72,7 +78,8 @@ func (e *EncryptedKey) Decrypt(priv *PrivateKey, config *Config) error { // padding oracle attacks. switch priv.PubKeyAlgo { case PubKeyAlgoRSA, PubKeyAlgoRSAEncryptOnly: - b, err = rsa.DecryptPKCS1v15(config.Random(), priv.PrivateKey.(*rsa.PrivateKey), e.encryptedMPI1.bytes) + k := priv.PrivateKey.(*rsa.PrivateKey) + b, err = rsa.DecryptPKCS1v15(config.Random(), k, padToKeySize(&k.PublicKey, e.encryptedMPI1.bytes)) case PubKeyAlgoElGamal: c1 := new(big.Int).SetBytes(e.encryptedMPI1.bytes) c2 := new(big.Int).SetBytes(e.encryptedMPI2.bytes) diff --git a/vendor/golang.org/x/crypto/openpgp/packet/packet.go b/vendor/golang.org/x/crypto/openpgp/packet/packet.go index 3eded93f042..625bb5ac809 100644 --- a/vendor/golang.org/x/crypto/openpgp/packet/packet.go +++ b/vendor/golang.org/x/crypto/openpgp/packet/packet.go @@ -11,10 +11,12 @@ import ( "crypto/aes" "crypto/cipher" "crypto/des" - "golang.org/x/crypto/cast5" - "golang.org/x/crypto/openpgp/errors" + "crypto/rsa" "io" "math/big" + + "golang.org/x/crypto/cast5" + "golang.org/x/crypto/openpgp/errors" ) // readFull is the same as io.ReadFull except that reading zero bytes returns @@ -500,19 +502,17 @@ func readMPI(r io.Reader) (mpi []byte, bitLength uint16, err error) { numBytes := (int(bitLength) + 7) / 8 mpi = make([]byte, numBytes) _, err = readFull(r, mpi) - return -} - -// mpiLength returns the length of the given *big.Int when serialized as an -// MPI. -func mpiLength(n *big.Int) (mpiLengthInBytes int) { - mpiLengthInBytes = 2 /* MPI length */ - mpiLengthInBytes += (n.BitLen() + 7) / 8 + // According to RFC 4880 3.2. we should check that the MPI has no leading + // zeroes (at least when not an encrypted MPI?), but this implementation + // does generate leading zeroes, so we keep accepting them. return } // writeMPI serializes a big integer to w. func writeMPI(w io.Writer, bitLength uint16, mpiBytes []byte) (err error) { + // Note that we can produce leading zeroes, in violation of RFC 4880 3.2. + // Implementations seem to be tolerant of them, and stripping them would + // make it complex to guarantee matching re-serialization. _, err = w.Write([]byte{byte(bitLength >> 8), byte(bitLength)}) if err == nil { _, err = w.Write(mpiBytes) @@ -525,6 +525,18 @@ func writeBig(w io.Writer, i *big.Int) error { return writeMPI(w, uint16(i.BitLen()), i.Bytes()) } +// padToKeySize left-pads a MPI with zeroes to match the length of the +// specified RSA public. +func padToKeySize(pub *rsa.PublicKey, b []byte) []byte { + k := (pub.N.BitLen() + 7) / 8 + if len(b) >= k { + return b + } + bb := make([]byte, k) + copy(bb[len(bb)-len(b):], b) + return bb +} + // CompressionAlgo Represents the different compression algorithms // supported by OpenPGP (except for BZIP2, which is not currently // supported). See Section 9.3 of RFC 4880. diff --git a/vendor/golang.org/x/crypto/openpgp/packet/public_key.go b/vendor/golang.org/x/crypto/openpgp/packet/public_key.go index ead26233dda..fcd5f525196 100644 --- a/vendor/golang.org/x/crypto/openpgp/packet/public_key.go +++ b/vendor/golang.org/x/crypto/openpgp/packet/public_key.go @@ -244,7 +244,12 @@ func NewECDSAPublicKey(creationTime time.Time, pub *ecdsa.PublicKey) *PublicKey } pk.ec.p.bytes = elliptic.Marshal(pub.Curve, pub.X, pub.Y) - pk.ec.p.bitLength = uint16(8 * len(pk.ec.p.bytes)) + + // The bit length is 3 (for the 0x04 specifying an uncompressed key) + // plus two field elements (for x and y), which are rounded up to the + // nearest byte. See https://tools.ietf.org/html/rfc6637#section-6 + fieldBytes := (pub.Curve.Params().BitSize + 7) & ^7 + pk.ec.p.bitLength = uint16(3 + fieldBytes + fieldBytes) pk.setFingerPrintAndKeyId() return pk @@ -515,7 +520,7 @@ func (pk *PublicKey) VerifySignature(signed hash.Hash, sig *Signature) (err erro switch pk.PubKeyAlgo { case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly: rsaPublicKey, _ := pk.PublicKey.(*rsa.PublicKey) - err = rsa.VerifyPKCS1v15(rsaPublicKey, sig.Hash, hashBytes, sig.RSASignature.bytes) + err = rsa.VerifyPKCS1v15(rsaPublicKey, sig.Hash, hashBytes, padToKeySize(rsaPublicKey, sig.RSASignature.bytes)) if err != nil { return errors.SignatureError("RSA verification failure") } @@ -566,7 +571,7 @@ func (pk *PublicKey) VerifySignatureV3(signed hash.Hash, sig *SignatureV3) (err switch pk.PubKeyAlgo { case PubKeyAlgoRSA, PubKeyAlgoRSASignOnly: rsaPublicKey := pk.PublicKey.(*rsa.PublicKey) - if err = rsa.VerifyPKCS1v15(rsaPublicKey, sig.Hash, hashBytes, sig.RSASignature.bytes); err != nil { + if err = rsa.VerifyPKCS1v15(rsaPublicKey, sig.Hash, hashBytes, padToKeySize(rsaPublicKey, sig.RSASignature.bytes)); err != nil { return errors.SignatureError("RSA verification failure") } return diff --git a/vendor/golang.org/x/crypto/ssh/terminal/util.go b/vendor/golang.org/x/crypto/ssh/terminal/util.go index 02dad484e5c..731c89a284a 100644 --- a/vendor/golang.org/x/crypto/ssh/terminal/util.go +++ b/vendor/golang.org/x/crypto/ssh/terminal/util.go @@ -108,9 +108,7 @@ func ReadPassword(fd int) ([]byte, error) { return nil, err } - defer func() { - unix.IoctlSetTermios(fd, ioctlWriteTermios, termios) - }() + defer unix.IoctlSetTermios(fd, ioctlWriteTermios, termios) return readPasswordLine(passwordReader(fd)) } diff --git a/vendor/golang.org/x/crypto/ssh/terminal/util_solaris.go b/vendor/golang.org/x/crypto/ssh/terminal/util_solaris.go index a2e1b57dc14..9e41b9f43f0 100644 --- a/vendor/golang.org/x/crypto/ssh/terminal/util_solaris.go +++ b/vendor/golang.org/x/crypto/ssh/terminal/util_solaris.go @@ -14,7 +14,7 @@ import ( // State contains the state of a terminal. type State struct { - state *unix.Termios + termios unix.Termios } // IsTerminal returns true if the given file descriptor is a terminal. @@ -75,47 +75,43 @@ func ReadPassword(fd int) ([]byte, error) { // restored. // see http://cr.illumos.org/~webrev/andy_js/1060/ func MakeRaw(fd int) (*State, error) { - oldTermiosPtr, err := unix.IoctlGetTermios(fd, unix.TCGETS) + termios, err := unix.IoctlGetTermios(fd, unix.TCGETS) if err != nil { return nil, err } - oldTermios := *oldTermiosPtr - - newTermios := oldTermios - newTermios.Iflag &^= syscall.IGNBRK | syscall.BRKINT | syscall.PARMRK | syscall.ISTRIP | syscall.INLCR | syscall.IGNCR | syscall.ICRNL | syscall.IXON - newTermios.Oflag &^= syscall.OPOST - newTermios.Lflag &^= syscall.ECHO | syscall.ECHONL | syscall.ICANON | syscall.ISIG | syscall.IEXTEN - newTermios.Cflag &^= syscall.CSIZE | syscall.PARENB - newTermios.Cflag |= syscall.CS8 - newTermios.Cc[unix.VMIN] = 1 - newTermios.Cc[unix.VTIME] = 0 - - if err := unix.IoctlSetTermios(fd, unix.TCSETS, &newTermios); err != nil { + + oldState := State{termios: *termios} + + termios.Iflag &^= unix.IGNBRK | unix.BRKINT | unix.PARMRK | unix.ISTRIP | unix.INLCR | unix.IGNCR | unix.ICRNL | unix.IXON + termios.Oflag &^= unix.OPOST + termios.Lflag &^= unix.ECHO | unix.ECHONL | unix.ICANON | unix.ISIG | unix.IEXTEN + termios.Cflag &^= unix.CSIZE | unix.PARENB + termios.Cflag |= unix.CS8 + termios.Cc[unix.VMIN] = 1 + termios.Cc[unix.VTIME] = 0 + + if err := unix.IoctlSetTermios(fd, unix.TCSETS, termios); err != nil { return nil, err } - return &State{ - state: oldTermiosPtr, - }, nil + return &oldState, nil } // Restore restores the terminal connected to the given file descriptor to a // previous state. func Restore(fd int, oldState *State) error { - return unix.IoctlSetTermios(fd, unix.TCSETS, oldState.state) + return unix.IoctlSetTermios(fd, unix.TCSETS, &oldState.termios) } // GetState returns the current state of a terminal which may be useful to // restore the terminal after a signal. func GetState(fd int) (*State, error) { - oldTermiosPtr, err := unix.IoctlGetTermios(fd, unix.TCGETS) + termios, err := unix.IoctlGetTermios(fd, unix.TCGETS) if err != nil { return nil, err } - return &State{ - state: oldTermiosPtr, - }, nil + return &State{termios: *termios}, nil } // GetSize returns the dimensions of the given terminal. diff --git a/vendor/golang.org/x/crypto/ssh/terminal/util_windows.go b/vendor/golang.org/x/crypto/ssh/terminal/util_windows.go index 4933ac36118..8618955df73 100644 --- a/vendor/golang.org/x/crypto/ssh/terminal/util_windows.go +++ b/vendor/golang.org/x/crypto/ssh/terminal/util_windows.go @@ -89,9 +89,7 @@ func ReadPassword(fd int) ([]byte, error) { return nil, err } - defer func() { - windows.SetConsoleMode(windows.Handle(fd), old) - }() + defer windows.SetConsoleMode(windows.Handle(fd), old) var h windows.Handle p, _ := windows.GetCurrentProcess() diff --git a/vendor/golang.org/x/image/tiff/reader.go b/vendor/golang.org/x/image/tiff/reader.go index 0e3d3325f5f..8a941c112fd 100644 --- a/vendor/golang.org/x/image/tiff/reader.go +++ b/vendor/golang.org/x/image/tiff/reader.go @@ -264,6 +264,9 @@ func (d *decoder) decode(dst image.Image, xmin, ymin, xmax, ymax int) error { } img.SetGray16(x, y, color.Gray16{v}) } + if rMaxX == img.Bounds().Max.X { + d.off += 2 * (xmax - img.Bounds().Max.X) + } } } else { img := dst.(*image.Gray) diff --git a/vendor/golang.org/x/net/http/httpguts/guts.go b/vendor/golang.org/x/net/http/httpguts/guts.go new file mode 100644 index 00000000000..8255fd49b42 --- /dev/null +++ b/vendor/golang.org/x/net/http/httpguts/guts.go @@ -0,0 +1,65 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package httpguts provides functions implementing various details +// of the HTTP specification. +// +// This package is shared by the standard library (which vendors it) +// and x/net/http2. It comes with no API stability promise. +package httpguts + +import ( + "net/textproto" + "strings" +) + +// SniffedContentType reports whether ct is a Content-Type that is known +// to cause client-side content sniffing. +// +// This provides just a partial implementation of mime.ParseMediaType +// with the assumption that the Content-Type is not attacker controlled. +func SniffedContentType(ct string) bool { + if i := strings.Index(ct, ";"); i != -1 { + ct = ct[:i] + } + ct = strings.ToLower(strings.TrimSpace(ct)) + return ct == "text/plain" || ct == "application/octet-stream" || + ct == "application/unknown" || ct == "unknown/unknown" || ct == "*/*" || + !strings.Contains(ct, "/") +} + +// ValidTrailerHeader reports whether name is a valid header field name to appear +// in trailers. +// See RFC 7230, Section 4.1.2 +func ValidTrailerHeader(name string) bool { + name = textproto.CanonicalMIMEHeaderKey(name) + if strings.HasPrefix(name, "If-") || badTrailer[name] { + return false + } + return true +} + +var badTrailer = map[string]bool{ + "Authorization": true, + "Cache-Control": true, + "Connection": true, + "Content-Encoding": true, + "Content-Length": true, + "Content-Range": true, + "Content-Type": true, + "Expect": true, + "Host": true, + "Keep-Alive": true, + "Max-Forwards": true, + "Pragma": true, + "Proxy-Authenticate": true, + "Proxy-Authorization": true, + "Proxy-Connection": true, + "Range": true, + "Realm": true, + "Te": true, + "Trailer": true, + "Transfer-Encoding": true, + "Www-Authenticate": true, +} diff --git a/vendor/golang.org/x/net/lex/httplex/httplex.go b/vendor/golang.org/x/net/http/httpguts/httplex.go similarity index 97% rename from vendor/golang.org/x/net/lex/httplex/httplex.go rename to vendor/golang.org/x/net/http/httpguts/httplex.go index 20f2b8940ba..e7de24ee64e 100644 --- a/vendor/golang.org/x/net/lex/httplex/httplex.go +++ b/vendor/golang.org/x/net/http/httpguts/httplex.go @@ -2,12 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// Package httplex contains rules around lexical matters of various -// HTTP-related specifications. -// -// This package is shared by the standard library (which vendors it) -// and x/net/http2. It comes with no API stability promise. -package httplex +package httpguts import ( "net" diff --git a/vendor/golang.org/x/net/http2/frame.go b/vendor/golang.org/x/net/http2/frame.go index 3b14890728f..e32500779af 100644 --- a/vendor/golang.org/x/net/http2/frame.go +++ b/vendor/golang.org/x/net/http2/frame.go @@ -14,8 +14,8 @@ import ( "strings" "sync" + "golang.org/x/net/http/httpguts" "golang.org/x/net/http2/hpack" - "golang.org/x/net/lex/httplex" ) const frameHeaderLen = 9 @@ -1462,7 +1462,7 @@ func (fr *Framer) readMetaFrame(hf *HeadersFrame) (*MetaHeadersFrame, error) { if VerboseLogs && fr.logReads { fr.debugReadLoggerf("http2: decoded hpack field %+v", hf) } - if !httplex.ValidHeaderFieldValue(hf.Value) { + if !httpguts.ValidHeaderFieldValue(hf.Value) { invalid = headerFieldValueError(hf.Value) } isPseudo := strings.HasPrefix(hf.Name, ":") diff --git a/vendor/golang.org/x/net/http2/hpack/hpack.go b/vendor/golang.org/x/net/http2/hpack/hpack.go index 176644acdac..166788ceec5 100644 --- a/vendor/golang.org/x/net/http2/hpack/hpack.go +++ b/vendor/golang.org/x/net/http2/hpack/hpack.go @@ -389,6 +389,12 @@ func (d *Decoder) callEmit(hf HeaderField) error { // (same invariants and behavior as parseHeaderFieldRepr) func (d *Decoder) parseDynamicTableSizeUpdate() error { + // RFC 7541, sec 4.2: This dynamic table size update MUST occur at the + // beginning of the first header block following the change to the dynamic table size. + if d.dynTab.size > 0 { + return DecodingError{errors.New("dynamic table size update MUST occur at the beginning of a header block")} + } + buf := d.buf size, buf, err := readVarInt(5, buf) if err != nil { diff --git a/vendor/golang.org/x/net/http2/http2.go b/vendor/golang.org/x/net/http2/http2.go index 71db28a873a..c82428254af 100644 --- a/vendor/golang.org/x/net/http2/http2.go +++ b/vendor/golang.org/x/net/http2/http2.go @@ -29,7 +29,7 @@ import ( "strings" "sync" - "golang.org/x/net/lex/httplex" + "golang.org/x/net/http/httpguts" ) var ( @@ -179,7 +179,7 @@ var ( ) // validWireHeaderFieldName reports whether v is a valid header field -// name (key). See httplex.ValidHeaderName for the base rules. +// name (key). See httpguts.ValidHeaderName for the base rules. // // Further, http2 says: // "Just as in HTTP/1.x, header field names are strings of ASCII @@ -191,7 +191,7 @@ func validWireHeaderFieldName(v string) bool { return false } for _, r := range v { - if !httplex.IsTokenRune(r) { + if !httpguts.IsTokenRune(r) { return false } if 'A' <= r && r <= 'Z' { diff --git a/vendor/golang.org/x/net/http2/server.go b/vendor/golang.org/x/net/http2/server.go index 39ed755a86e..acf3b24186f 100644 --- a/vendor/golang.org/x/net/http2/server.go +++ b/vendor/golang.org/x/net/http2/server.go @@ -46,6 +46,7 @@ import ( "sync" "time" + "golang.org/x/net/http/httpguts" "golang.org/x/net/http2/hpack" ) @@ -1607,7 +1608,10 @@ func (sc *serverConn) processData(f *DataFrame) error { // Sender sending more than they'd declared? if st.declBodyBytes != -1 && st.bodyBytes+int64(len(data)) > st.declBodyBytes { st.body.CloseWithError(fmt.Errorf("sender tried to send more than declared Content-Length of %d bytes", st.declBodyBytes)) - return streamError(id, ErrCodeStreamClosed) + // RFC 7540, sec 8.1.2.6: A request or response is also malformed if the + // value of a content-length header field does not equal the sum of the + // DATA frame payload lengths that form the body. + return streamError(id, ErrCodeProtocol) } if f.Length > 0 { // Check whether the client has flow control quota. @@ -1817,7 +1821,7 @@ func (st *stream) processTrailerHeaders(f *MetaHeadersFrame) error { if st.trailer != nil { for _, hf := range f.RegularFields() { key := sc.canonicalHeader(hf.Name) - if !ValidTrailerHeader(key) { + if !httpguts.ValidTrailerHeader(key) { // TODO: send more details to the peer somehow. But http2 has // no way to send debug data at a stream level. Discuss with // HTTP folk. @@ -2284,7 +2288,7 @@ func (rws *responseWriterState) hasTrailers() bool { return len(rws.trailers) != // written in the trailers at the end of the response. func (rws *responseWriterState) declareTrailer(k string) { k = http.CanonicalHeaderKey(k) - if !ValidTrailerHeader(k) { + if !httpguts.ValidTrailerHeader(k) { // Forbidden by RFC 7230, section 4.1.2. rws.conn.logf("ignoring invalid trailer %q", k) return @@ -2308,6 +2312,7 @@ func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) { isHeadResp := rws.req.Method == "HEAD" if !rws.sentHeader { rws.sentHeader = true + var ctype, clen string if clen = rws.snapHeader.Get("Content-Length"); clen != "" { rws.snapHeader.Del("Content-Length") @@ -2321,10 +2326,33 @@ func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) { if clen == "" && rws.handlerDone && bodyAllowedForStatus(rws.status) && (len(p) > 0 || !isHeadResp) { clen = strconv.Itoa(len(p)) } + _, hasContentType := rws.snapHeader["Content-Type"] if !hasContentType && bodyAllowedForStatus(rws.status) && len(p) > 0 { - ctype = http.DetectContentType(p) + if cto := rws.snapHeader.Get("X-Content-Type-Options"); strings.EqualFold("nosniff", cto) { + // nosniff is an explicit directive not to guess a content-type. + // Content-sniffing is no less susceptible to polyglot attacks via + // hosted content when done on the server. + ctype = "application/octet-stream" + rws.conn.logf("http2: WriteHeader called with X-Content-Type-Options:nosniff but no Content-Type") + } else { + ctype = http.DetectContentType(p) + } + } + + var noSniff bool + if bodyAllowedForStatus(rws.status) && (rws.sentContentLen > 0 || len(p) > 0) { + // If the content type triggers client-side sniffing on old browsers, + // attach a X-Content-Type-Options header if not present (or explicitly nil). + if _, ok := rws.snapHeader["X-Content-Type-Options"]; !ok { + if hasContentType { + noSniff = httpguts.SniffedContentType(rws.snapHeader.Get("Content-Type")) + } else if ctype != "" { + noSniff = httpguts.SniffedContentType(ctype) + } + } } + var date string if _, ok := rws.snapHeader["Date"]; !ok { // TODO(bradfitz): be faster here, like net/http? measure. @@ -2343,6 +2371,7 @@ func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) { endStream: endStream, contentType: ctype, contentLength: clen, + noSniff: noSniff, date: date, }) if err != nil { @@ -2838,41 +2867,6 @@ func new400Handler(err error) http.HandlerFunc { } } -// ValidTrailerHeader reports whether name is a valid header field name to appear -// in trailers. -// See: http://tools.ietf.org/html/rfc7230#section-4.1.2 -func ValidTrailerHeader(name string) bool { - name = http.CanonicalHeaderKey(name) - if strings.HasPrefix(name, "If-") || badTrailer[name] { - return false - } - return true -} - -var badTrailer = map[string]bool{ - "Authorization": true, - "Cache-Control": true, - "Connection": true, - "Content-Encoding": true, - "Content-Length": true, - "Content-Range": true, - "Content-Type": true, - "Expect": true, - "Host": true, - "Keep-Alive": true, - "Max-Forwards": true, - "Pragma": true, - "Proxy-Authenticate": true, - "Proxy-Authorization": true, - "Proxy-Connection": true, - "Range": true, - "Realm": true, - "Te": true, - "Trailer": true, - "Transfer-Encoding": true, - "Www-Authenticate": true, -} - // h1ServerKeepAlivesDisabled reports whether hs has its keep-alives // disabled. See comments on h1ServerShutdownChan above for why // the code is written this way. diff --git a/vendor/golang.org/x/net/http2/transport.go b/vendor/golang.org/x/net/http2/transport.go index e6b321f4bb6..d23a226251c 100644 --- a/vendor/golang.org/x/net/http2/transport.go +++ b/vendor/golang.org/x/net/http2/transport.go @@ -27,9 +27,9 @@ import ( "sync" "time" + "golang.org/x/net/http/httpguts" "golang.org/x/net/http2/hpack" "golang.org/x/net/idna" - "golang.org/x/net/lex/httplex" ) const ( @@ -567,6 +567,10 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro // henc in response to SETTINGS frames? cc.henc = hpack.NewEncoder(&cc.hbuf) + if t.AllowHTTP { + cc.nextStreamID = 3 + } + if cs, ok := c.(connectionStater); ok { state := cs.ConnectionState() cc.tlsState = &state @@ -951,6 +955,9 @@ func (cc *ClientConn) awaitOpenSlotForRequest(req *http.Request) error { for { cc.lastActive = time.Now() if cc.closed || !cc.canTakeNewRequestLocked() { + if waitingForConn != nil { + close(waitingForConn) + } return errClientConnUnusable } if int64(len(cc.streams))+1 <= int64(cc.maxConcurrentStreams) { @@ -1174,7 +1181,7 @@ func (cc *ClientConn) encodeHeaders(req *http.Request, addGzipHeader bool, trail if host == "" { host = req.URL.Host } - host, err := httplex.PunycodeHostPort(host) + host, err := httpguts.PunycodeHostPort(host) if err != nil { return nil, err } @@ -1199,11 +1206,11 @@ func (cc *ClientConn) encodeHeaders(req *http.Request, addGzipHeader bool, trail // potentially pollute our hpack state. (We want to be able to // continue to reuse the hpack encoder for future requests) for k, vv := range req.Header { - if !httplex.ValidHeaderFieldName(k) { + if !httpguts.ValidHeaderFieldName(k) { return nil, fmt.Errorf("invalid HTTP header name %q", k) } for _, v := range vv { - if !httplex.ValidHeaderFieldValue(v) { + if !httpguts.ValidHeaderFieldValue(v) { return nil, fmt.Errorf("invalid HTTP header value %q for header %q", v, k) } } @@ -2244,7 +2251,7 @@ func (t *Transport) getBodyWriterState(cs *clientStream, body io.Reader) (s body } s.delay = t.expectContinueTimeout() if s.delay == 0 || - !httplex.HeaderValuesContainsToken( + !httpguts.HeaderValuesContainsToken( cs.req.Header["Expect"], "100-continue") { return @@ -2299,5 +2306,5 @@ func (s bodyWriterState) scheduleBodyWrite() { // isConnectionCloseRequest reports whether req should use its own // connection for a single request and then close the connection. func isConnectionCloseRequest(req *http.Request) bool { - return req.Close || httplex.HeaderValuesContainsToken(req.Header["Connection"], "close") + return req.Close || httpguts.HeaderValuesContainsToken(req.Header["Connection"], "close") } diff --git a/vendor/golang.org/x/net/http2/write.go b/vendor/golang.org/x/net/http2/write.go index 54ab4a88e7b..a5120412e67 100644 --- a/vendor/golang.org/x/net/http2/write.go +++ b/vendor/golang.org/x/net/http2/write.go @@ -11,8 +11,8 @@ import ( "net/http" "net/url" + "golang.org/x/net/http/httpguts" "golang.org/x/net/http2/hpack" - "golang.org/x/net/lex/httplex" ) // writeFramer is implemented by any type that is used to write frames. @@ -186,6 +186,7 @@ type writeResHeaders struct { date string contentType string contentLength string + noSniff bool } func encKV(enc *hpack.Encoder, k, v string) { @@ -222,6 +223,9 @@ func (w *writeResHeaders) writeFrame(ctx writeContext) error { if w.contentLength != "" { encKV(enc, "content-length", w.contentLength) } + if w.noSniff { + encKV(enc, "x-content-type-options", "nosniff") + } if w.date != "" { encKV(enc, "date", w.date) } @@ -350,7 +354,7 @@ func encodeHeaders(enc *hpack.Encoder, h http.Header, keys []string) { } isTE := k == "transfer-encoding" for _, v := range vv { - if !httplex.ValidHeaderFieldValue(v) { + if !httpguts.ValidHeaderFieldValue(v) { // TODO: return an error? golang.org/issue/14048 // For now just omit it. continue diff --git a/vendor/golang.org/x/net/internal/timeseries/timeseries.go b/vendor/golang.org/x/net/internal/timeseries/timeseries.go new file mode 100644 index 00000000000..685f0e7ea23 --- /dev/null +++ b/vendor/golang.org/x/net/internal/timeseries/timeseries.go @@ -0,0 +1,525 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package timeseries implements a time series structure for stats collection. +package timeseries // import "golang.org/x/net/internal/timeseries" + +import ( + "fmt" + "log" + "time" +) + +const ( + timeSeriesNumBuckets = 64 + minuteHourSeriesNumBuckets = 60 +) + +var timeSeriesResolutions = []time.Duration{ + 1 * time.Second, + 10 * time.Second, + 1 * time.Minute, + 10 * time.Minute, + 1 * time.Hour, + 6 * time.Hour, + 24 * time.Hour, // 1 day + 7 * 24 * time.Hour, // 1 week + 4 * 7 * 24 * time.Hour, // 4 weeks + 16 * 7 * 24 * time.Hour, // 16 weeks +} + +var minuteHourSeriesResolutions = []time.Duration{ + 1 * time.Second, + 1 * time.Minute, +} + +// An Observable is a kind of data that can be aggregated in a time series. +type Observable interface { + Multiply(ratio float64) // Multiplies the data in self by a given ratio + Add(other Observable) // Adds the data from a different observation to self + Clear() // Clears the observation so it can be reused. + CopyFrom(other Observable) // Copies the contents of a given observation to self +} + +// Float attaches the methods of Observable to a float64. +type Float float64 + +// NewFloat returns a Float. +func NewFloat() Observable { + f := Float(0) + return &f +} + +// String returns the float as a string. +func (f *Float) String() string { return fmt.Sprintf("%g", f.Value()) } + +// Value returns the float's value. +func (f *Float) Value() float64 { return float64(*f) } + +func (f *Float) Multiply(ratio float64) { *f *= Float(ratio) } + +func (f *Float) Add(other Observable) { + o := other.(*Float) + *f += *o +} + +func (f *Float) Clear() { *f = 0 } + +func (f *Float) CopyFrom(other Observable) { + o := other.(*Float) + *f = *o +} + +// A Clock tells the current time. +type Clock interface { + Time() time.Time +} + +type defaultClock int + +var defaultClockInstance defaultClock + +func (defaultClock) Time() time.Time { return time.Now() } + +// Information kept per level. Each level consists of a circular list of +// observations. The start of the level may be derived from end and the +// len(buckets) * sizeInMillis. +type tsLevel struct { + oldest int // index to oldest bucketed Observable + newest int // index to newest bucketed Observable + end time.Time // end timestamp for this level + size time.Duration // duration of the bucketed Observable + buckets []Observable // collections of observations + provider func() Observable // used for creating new Observable +} + +func (l *tsLevel) Clear() { + l.oldest = 0 + l.newest = len(l.buckets) - 1 + l.end = time.Time{} + for i := range l.buckets { + if l.buckets[i] != nil { + l.buckets[i].Clear() + l.buckets[i] = nil + } + } +} + +func (l *tsLevel) InitLevel(size time.Duration, numBuckets int, f func() Observable) { + l.size = size + l.provider = f + l.buckets = make([]Observable, numBuckets) +} + +// Keeps a sequence of levels. Each level is responsible for storing data at +// a given resolution. For example, the first level stores data at a one +// minute resolution while the second level stores data at a one hour +// resolution. + +// Each level is represented by a sequence of buckets. Each bucket spans an +// interval equal to the resolution of the level. New observations are added +// to the last bucket. +type timeSeries struct { + provider func() Observable // make more Observable + numBuckets int // number of buckets in each level + levels []*tsLevel // levels of bucketed Observable + lastAdd time.Time // time of last Observable tracked + total Observable // convenient aggregation of all Observable + clock Clock // Clock for getting current time + pending Observable // observations not yet bucketed + pendingTime time.Time // what time are we keeping in pending + dirty bool // if there are pending observations +} + +// init initializes a level according to the supplied criteria. +func (ts *timeSeries) init(resolutions []time.Duration, f func() Observable, numBuckets int, clock Clock) { + ts.provider = f + ts.numBuckets = numBuckets + ts.clock = clock + ts.levels = make([]*tsLevel, len(resolutions)) + + for i := range resolutions { + if i > 0 && resolutions[i-1] >= resolutions[i] { + log.Print("timeseries: resolutions must be monotonically increasing") + break + } + newLevel := new(tsLevel) + newLevel.InitLevel(resolutions[i], ts.numBuckets, ts.provider) + ts.levels[i] = newLevel + } + + ts.Clear() +} + +// Clear removes all observations from the time series. +func (ts *timeSeries) Clear() { + ts.lastAdd = time.Time{} + ts.total = ts.resetObservation(ts.total) + ts.pending = ts.resetObservation(ts.pending) + ts.pendingTime = time.Time{} + ts.dirty = false + + for i := range ts.levels { + ts.levels[i].Clear() + } +} + +// Add records an observation at the current time. +func (ts *timeSeries) Add(observation Observable) { + ts.AddWithTime(observation, ts.clock.Time()) +} + +// AddWithTime records an observation at the specified time. +func (ts *timeSeries) AddWithTime(observation Observable, t time.Time) { + + smallBucketDuration := ts.levels[0].size + + if t.After(ts.lastAdd) { + ts.lastAdd = t + } + + if t.After(ts.pendingTime) { + ts.advance(t) + ts.mergePendingUpdates() + ts.pendingTime = ts.levels[0].end + ts.pending.CopyFrom(observation) + ts.dirty = true + } else if t.After(ts.pendingTime.Add(-1 * smallBucketDuration)) { + // The observation is close enough to go into the pending bucket. + // This compensates for clock skewing and small scheduling delays + // by letting the update stay in the fast path. + ts.pending.Add(observation) + ts.dirty = true + } else { + ts.mergeValue(observation, t) + } +} + +// mergeValue inserts the observation at the specified time in the past into all levels. +func (ts *timeSeries) mergeValue(observation Observable, t time.Time) { + for _, level := range ts.levels { + index := (ts.numBuckets - 1) - int(level.end.Sub(t)/level.size) + if 0 <= index && index < ts.numBuckets { + bucketNumber := (level.oldest + index) % ts.numBuckets + if level.buckets[bucketNumber] == nil { + level.buckets[bucketNumber] = level.provider() + } + level.buckets[bucketNumber].Add(observation) + } + } + ts.total.Add(observation) +} + +// mergePendingUpdates applies the pending updates into all levels. +func (ts *timeSeries) mergePendingUpdates() { + if ts.dirty { + ts.mergeValue(ts.pending, ts.pendingTime) + ts.pending = ts.resetObservation(ts.pending) + ts.dirty = false + } +} + +// advance cycles the buckets at each level until the latest bucket in +// each level can hold the time specified. +func (ts *timeSeries) advance(t time.Time) { + if !t.After(ts.levels[0].end) { + return + } + for i := 0; i < len(ts.levels); i++ { + level := ts.levels[i] + if !level.end.Before(t) { + break + } + + // If the time is sufficiently far, just clear the level and advance + // directly. + if !t.Before(level.end.Add(level.size * time.Duration(ts.numBuckets))) { + for _, b := range level.buckets { + ts.resetObservation(b) + } + level.end = time.Unix(0, (t.UnixNano()/level.size.Nanoseconds())*level.size.Nanoseconds()) + } + + for t.After(level.end) { + level.end = level.end.Add(level.size) + level.newest = level.oldest + level.oldest = (level.oldest + 1) % ts.numBuckets + ts.resetObservation(level.buckets[level.newest]) + } + + t = level.end + } +} + +// Latest returns the sum of the num latest buckets from the level. +func (ts *timeSeries) Latest(level, num int) Observable { + now := ts.clock.Time() + if ts.levels[0].end.Before(now) { + ts.advance(now) + } + + ts.mergePendingUpdates() + + result := ts.provider() + l := ts.levels[level] + index := l.newest + + for i := 0; i < num; i++ { + if l.buckets[index] != nil { + result.Add(l.buckets[index]) + } + if index == 0 { + index = ts.numBuckets + } + index-- + } + + return result +} + +// LatestBuckets returns a copy of the num latest buckets from level. +func (ts *timeSeries) LatestBuckets(level, num int) []Observable { + if level < 0 || level > len(ts.levels) { + log.Print("timeseries: bad level argument: ", level) + return nil + } + if num < 0 || num >= ts.numBuckets { + log.Print("timeseries: bad num argument: ", num) + return nil + } + + results := make([]Observable, num) + now := ts.clock.Time() + if ts.levels[0].end.Before(now) { + ts.advance(now) + } + + ts.mergePendingUpdates() + + l := ts.levels[level] + index := l.newest + + for i := 0; i < num; i++ { + result := ts.provider() + results[i] = result + if l.buckets[index] != nil { + result.CopyFrom(l.buckets[index]) + } + + if index == 0 { + index = ts.numBuckets + } + index -= 1 + } + return results +} + +// ScaleBy updates observations by scaling by factor. +func (ts *timeSeries) ScaleBy(factor float64) { + for _, l := range ts.levels { + for i := 0; i < ts.numBuckets; i++ { + l.buckets[i].Multiply(factor) + } + } + + ts.total.Multiply(factor) + ts.pending.Multiply(factor) +} + +// Range returns the sum of observations added over the specified time range. +// If start or finish times don't fall on bucket boundaries of the same +// level, then return values are approximate answers. +func (ts *timeSeries) Range(start, finish time.Time) Observable { + return ts.ComputeRange(start, finish, 1)[0] +} + +// Recent returns the sum of observations from the last delta. +func (ts *timeSeries) Recent(delta time.Duration) Observable { + now := ts.clock.Time() + return ts.Range(now.Add(-delta), now) +} + +// Total returns the total of all observations. +func (ts *timeSeries) Total() Observable { + ts.mergePendingUpdates() + return ts.total +} + +// ComputeRange computes a specified number of values into a slice using +// the observations recorded over the specified time period. The return +// values are approximate if the start or finish times don't fall on the +// bucket boundaries at the same level or if the number of buckets spanning +// the range is not an integral multiple of num. +func (ts *timeSeries) ComputeRange(start, finish time.Time, num int) []Observable { + if start.After(finish) { + log.Printf("timeseries: start > finish, %v>%v", start, finish) + return nil + } + + if num < 0 { + log.Printf("timeseries: num < 0, %v", num) + return nil + } + + results := make([]Observable, num) + + for _, l := range ts.levels { + if !start.Before(l.end.Add(-l.size * time.Duration(ts.numBuckets))) { + ts.extract(l, start, finish, num, results) + return results + } + } + + // Failed to find a level that covers the desired range. So just + // extract from the last level, even if it doesn't cover the entire + // desired range. + ts.extract(ts.levels[len(ts.levels)-1], start, finish, num, results) + + return results +} + +// RecentList returns the specified number of values in slice over the most +// recent time period of the specified range. +func (ts *timeSeries) RecentList(delta time.Duration, num int) []Observable { + if delta < 0 { + return nil + } + now := ts.clock.Time() + return ts.ComputeRange(now.Add(-delta), now, num) +} + +// extract returns a slice of specified number of observations from a given +// level over a given range. +func (ts *timeSeries) extract(l *tsLevel, start, finish time.Time, num int, results []Observable) { + ts.mergePendingUpdates() + + srcInterval := l.size + dstInterval := finish.Sub(start) / time.Duration(num) + dstStart := start + srcStart := l.end.Add(-srcInterval * time.Duration(ts.numBuckets)) + + srcIndex := 0 + + // Where should scanning start? + if dstStart.After(srcStart) { + advance := dstStart.Sub(srcStart) / srcInterval + srcIndex += int(advance) + srcStart = srcStart.Add(advance * srcInterval) + } + + // The i'th value is computed as show below. + // interval = (finish/start)/num + // i'th value = sum of observation in range + // [ start + i * interval, + // start + (i + 1) * interval ) + for i := 0; i < num; i++ { + results[i] = ts.resetObservation(results[i]) + dstEnd := dstStart.Add(dstInterval) + for srcIndex < ts.numBuckets && srcStart.Before(dstEnd) { + srcEnd := srcStart.Add(srcInterval) + if srcEnd.After(ts.lastAdd) { + srcEnd = ts.lastAdd + } + + if !srcEnd.Before(dstStart) { + srcValue := l.buckets[(srcIndex+l.oldest)%ts.numBuckets] + if !srcStart.Before(dstStart) && !srcEnd.After(dstEnd) { + // dst completely contains src. + if srcValue != nil { + results[i].Add(srcValue) + } + } else { + // dst partially overlaps src. + overlapStart := maxTime(srcStart, dstStart) + overlapEnd := minTime(srcEnd, dstEnd) + base := srcEnd.Sub(srcStart) + fraction := overlapEnd.Sub(overlapStart).Seconds() / base.Seconds() + + used := ts.provider() + if srcValue != nil { + used.CopyFrom(srcValue) + } + used.Multiply(fraction) + results[i].Add(used) + } + + if srcEnd.After(dstEnd) { + break + } + } + srcIndex++ + srcStart = srcStart.Add(srcInterval) + } + dstStart = dstStart.Add(dstInterval) + } +} + +// resetObservation clears the content so the struct may be reused. +func (ts *timeSeries) resetObservation(observation Observable) Observable { + if observation == nil { + observation = ts.provider() + } else { + observation.Clear() + } + return observation +} + +// TimeSeries tracks data at granularities from 1 second to 16 weeks. +type TimeSeries struct { + timeSeries +} + +// NewTimeSeries creates a new TimeSeries using the function provided for creating new Observable. +func NewTimeSeries(f func() Observable) *TimeSeries { + return NewTimeSeriesWithClock(f, defaultClockInstance) +} + +// NewTimeSeriesWithClock creates a new TimeSeries using the function provided for creating new Observable and the clock for +// assigning timestamps. +func NewTimeSeriesWithClock(f func() Observable, clock Clock) *TimeSeries { + ts := new(TimeSeries) + ts.timeSeries.init(timeSeriesResolutions, f, timeSeriesNumBuckets, clock) + return ts +} + +// MinuteHourSeries tracks data at granularities of 1 minute and 1 hour. +type MinuteHourSeries struct { + timeSeries +} + +// NewMinuteHourSeries creates a new MinuteHourSeries using the function provided for creating new Observable. +func NewMinuteHourSeries(f func() Observable) *MinuteHourSeries { + return NewMinuteHourSeriesWithClock(f, defaultClockInstance) +} + +// NewMinuteHourSeriesWithClock creates a new MinuteHourSeries using the function provided for creating new Observable and the clock for +// assigning timestamps. +func NewMinuteHourSeriesWithClock(f func() Observable, clock Clock) *MinuteHourSeries { + ts := new(MinuteHourSeries) + ts.timeSeries.init(minuteHourSeriesResolutions, f, + minuteHourSeriesNumBuckets, clock) + return ts +} + +func (ts *MinuteHourSeries) Minute() Observable { + return ts.timeSeries.Latest(0, 60) +} + +func (ts *MinuteHourSeries) Hour() Observable { + return ts.timeSeries.Latest(1, 60) +} + +func minTime(a, b time.Time) time.Time { + if a.Before(b) { + return a + } + return b +} + +func maxTime(a, b time.Time) time.Time { + if a.After(b) { + return a + } + return b +} diff --git a/vendor/golang.org/x/net/trace/events.go b/vendor/golang.org/x/net/trace/events.go new file mode 100644 index 00000000000..c646a6952e5 --- /dev/null +++ b/vendor/golang.org/x/net/trace/events.go @@ -0,0 +1,532 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package trace + +import ( + "bytes" + "fmt" + "html/template" + "io" + "log" + "net/http" + "runtime" + "sort" + "strconv" + "strings" + "sync" + "sync/atomic" + "text/tabwriter" + "time" +) + +const maxEventsPerLog = 100 + +type bucket struct { + MaxErrAge time.Duration + String string +} + +var buckets = []bucket{ + {0, "total"}, + {10 * time.Second, "errs<10s"}, + {1 * time.Minute, "errs<1m"}, + {10 * time.Minute, "errs<10m"}, + {1 * time.Hour, "errs<1h"}, + {10 * time.Hour, "errs<10h"}, + {24000 * time.Hour, "errors"}, +} + +// RenderEvents renders the HTML page typically served at /debug/events. +// It does not do any auth checking. The request may be nil. +// +// Most users will use the Events handler. +func RenderEvents(w http.ResponseWriter, req *http.Request, sensitive bool) { + now := time.Now() + data := &struct { + Families []string // family names + Buckets []bucket + Counts [][]int // eventLog count per family/bucket + + // Set when a bucket has been selected. + Family string + Bucket int + EventLogs eventLogs + Expanded bool + }{ + Buckets: buckets, + } + + data.Families = make([]string, 0, len(families)) + famMu.RLock() + for name := range families { + data.Families = append(data.Families, name) + } + famMu.RUnlock() + sort.Strings(data.Families) + + // Count the number of eventLogs in each family for each error age. + data.Counts = make([][]int, len(data.Families)) + for i, name := range data.Families { + // TODO(sameer): move this loop under the family lock. + f := getEventFamily(name) + data.Counts[i] = make([]int, len(data.Buckets)) + for j, b := range data.Buckets { + data.Counts[i][j] = f.Count(now, b.MaxErrAge) + } + } + + if req != nil { + var ok bool + data.Family, data.Bucket, ok = parseEventsArgs(req) + if !ok { + // No-op + } else { + data.EventLogs = getEventFamily(data.Family).Copy(now, buckets[data.Bucket].MaxErrAge) + } + if data.EventLogs != nil { + defer data.EventLogs.Free() + sort.Sort(data.EventLogs) + } + if exp, err := strconv.ParseBool(req.FormValue("exp")); err == nil { + data.Expanded = exp + } + } + + famMu.RLock() + defer famMu.RUnlock() + if err := eventsTmpl().Execute(w, data); err != nil { + log.Printf("net/trace: Failed executing template: %v", err) + } +} + +func parseEventsArgs(req *http.Request) (fam string, b int, ok bool) { + fam, bStr := req.FormValue("fam"), req.FormValue("b") + if fam == "" || bStr == "" { + return "", 0, false + } + b, err := strconv.Atoi(bStr) + if err != nil || b < 0 || b >= len(buckets) { + return "", 0, false + } + return fam, b, true +} + +// An EventLog provides a log of events associated with a specific object. +type EventLog interface { + // Printf formats its arguments with fmt.Sprintf and adds the + // result to the event log. + Printf(format string, a ...interface{}) + + // Errorf is like Printf, but it marks this event as an error. + Errorf(format string, a ...interface{}) + + // Finish declares that this event log is complete. + // The event log should not be used after calling this method. + Finish() +} + +// NewEventLog returns a new EventLog with the specified family name +// and title. +func NewEventLog(family, title string) EventLog { + el := newEventLog() + el.ref() + el.Family, el.Title = family, title + el.Start = time.Now() + el.events = make([]logEntry, 0, maxEventsPerLog) + el.stack = make([]uintptr, 32) + n := runtime.Callers(2, el.stack) + el.stack = el.stack[:n] + + getEventFamily(family).add(el) + return el +} + +func (el *eventLog) Finish() { + getEventFamily(el.Family).remove(el) + el.unref() // matches ref in New +} + +var ( + famMu sync.RWMutex + families = make(map[string]*eventFamily) // family name => family +) + +func getEventFamily(fam string) *eventFamily { + famMu.Lock() + defer famMu.Unlock() + f := families[fam] + if f == nil { + f = &eventFamily{} + families[fam] = f + } + return f +} + +type eventFamily struct { + mu sync.RWMutex + eventLogs eventLogs +} + +func (f *eventFamily) add(el *eventLog) { + f.mu.Lock() + f.eventLogs = append(f.eventLogs, el) + f.mu.Unlock() +} + +func (f *eventFamily) remove(el *eventLog) { + f.mu.Lock() + defer f.mu.Unlock() + for i, el0 := range f.eventLogs { + if el == el0 { + copy(f.eventLogs[i:], f.eventLogs[i+1:]) + f.eventLogs = f.eventLogs[:len(f.eventLogs)-1] + return + } + } +} + +func (f *eventFamily) Count(now time.Time, maxErrAge time.Duration) (n int) { + f.mu.RLock() + defer f.mu.RUnlock() + for _, el := range f.eventLogs { + if el.hasRecentError(now, maxErrAge) { + n++ + } + } + return +} + +func (f *eventFamily) Copy(now time.Time, maxErrAge time.Duration) (els eventLogs) { + f.mu.RLock() + defer f.mu.RUnlock() + els = make(eventLogs, 0, len(f.eventLogs)) + for _, el := range f.eventLogs { + if el.hasRecentError(now, maxErrAge) { + el.ref() + els = append(els, el) + } + } + return +} + +type eventLogs []*eventLog + +// Free calls unref on each element of the list. +func (els eventLogs) Free() { + for _, el := range els { + el.unref() + } +} + +// eventLogs may be sorted in reverse chronological order. +func (els eventLogs) Len() int { return len(els) } +func (els eventLogs) Less(i, j int) bool { return els[i].Start.After(els[j].Start) } +func (els eventLogs) Swap(i, j int) { els[i], els[j] = els[j], els[i] } + +// A logEntry is a timestamped log entry in an event log. +type logEntry struct { + When time.Time + Elapsed time.Duration // since previous event in log + NewDay bool // whether this event is on a different day to the previous event + What string + IsErr bool +} + +// WhenString returns a string representation of the elapsed time of the event. +// It will include the date if midnight was crossed. +func (e logEntry) WhenString() string { + if e.NewDay { + return e.When.Format("2006/01/02 15:04:05.000000") + } + return e.When.Format("15:04:05.000000") +} + +// An eventLog represents an active event log. +type eventLog struct { + // Family is the top-level grouping of event logs to which this belongs. + Family string + + // Title is the title of this event log. + Title string + + // Timing information. + Start time.Time + + // Call stack where this event log was created. + stack []uintptr + + // Append-only sequence of events. + // + // TODO(sameer): change this to a ring buffer to avoid the array copy + // when we hit maxEventsPerLog. + mu sync.RWMutex + events []logEntry + LastErrorTime time.Time + discarded int + + refs int32 // how many buckets this is in +} + +func (el *eventLog) reset() { + // Clear all but the mutex. Mutexes may not be copied, even when unlocked. + el.Family = "" + el.Title = "" + el.Start = time.Time{} + el.stack = nil + el.events = nil + el.LastErrorTime = time.Time{} + el.discarded = 0 + el.refs = 0 +} + +func (el *eventLog) hasRecentError(now time.Time, maxErrAge time.Duration) bool { + if maxErrAge == 0 { + return true + } + el.mu.RLock() + defer el.mu.RUnlock() + return now.Sub(el.LastErrorTime) < maxErrAge +} + +// delta returns the elapsed time since the last event or the log start, +// and whether it spans midnight. +// L >= el.mu +func (el *eventLog) delta(t time.Time) (time.Duration, bool) { + if len(el.events) == 0 { + return t.Sub(el.Start), false + } + prev := el.events[len(el.events)-1].When + return t.Sub(prev), prev.Day() != t.Day() + +} + +func (el *eventLog) Printf(format string, a ...interface{}) { + el.printf(false, format, a...) +} + +func (el *eventLog) Errorf(format string, a ...interface{}) { + el.printf(true, format, a...) +} + +func (el *eventLog) printf(isErr bool, format string, a ...interface{}) { + e := logEntry{When: time.Now(), IsErr: isErr, What: fmt.Sprintf(format, a...)} + el.mu.Lock() + e.Elapsed, e.NewDay = el.delta(e.When) + if len(el.events) < maxEventsPerLog { + el.events = append(el.events, e) + } else { + // Discard the oldest event. + if el.discarded == 0 { + // el.discarded starts at two to count for the event it + // is replacing, plus the next one that we are about to + // drop. + el.discarded = 2 + } else { + el.discarded++ + } + // TODO(sameer): if this causes allocations on a critical path, + // change eventLog.What to be a fmt.Stringer, as in trace.go. + el.events[0].What = fmt.Sprintf("(%d events discarded)", el.discarded) + // The timestamp of the discarded meta-event should be + // the time of the last event it is representing. + el.events[0].When = el.events[1].When + copy(el.events[1:], el.events[2:]) + el.events[maxEventsPerLog-1] = e + } + if e.IsErr { + el.LastErrorTime = e.When + } + el.mu.Unlock() +} + +func (el *eventLog) ref() { + atomic.AddInt32(&el.refs, 1) +} + +func (el *eventLog) unref() { + if atomic.AddInt32(&el.refs, -1) == 0 { + freeEventLog(el) + } +} + +func (el *eventLog) When() string { + return el.Start.Format("2006/01/02 15:04:05.000000") +} + +func (el *eventLog) ElapsedTime() string { + elapsed := time.Since(el.Start) + return fmt.Sprintf("%.6f", elapsed.Seconds()) +} + +func (el *eventLog) Stack() string { + buf := new(bytes.Buffer) + tw := tabwriter.NewWriter(buf, 1, 8, 1, '\t', 0) + printStackRecord(tw, el.stack) + tw.Flush() + return buf.String() +} + +// printStackRecord prints the function + source line information +// for a single stack trace. +// Adapted from runtime/pprof/pprof.go. +func printStackRecord(w io.Writer, stk []uintptr) { + for _, pc := range stk { + f := runtime.FuncForPC(pc) + if f == nil { + continue + } + file, line := f.FileLine(pc) + name := f.Name() + // Hide runtime.goexit and any runtime functions at the beginning. + if strings.HasPrefix(name, "runtime.") { + continue + } + fmt.Fprintf(w, "# %s\t%s:%d\n", name, file, line) + } +} + +func (el *eventLog) Events() []logEntry { + el.mu.RLock() + defer el.mu.RUnlock() + return el.events +} + +// freeEventLogs is a freelist of *eventLog +var freeEventLogs = make(chan *eventLog, 1000) + +// newEventLog returns a event log ready to use. +func newEventLog() *eventLog { + select { + case el := <-freeEventLogs: + return el + default: + return new(eventLog) + } +} + +// freeEventLog adds el to freeEventLogs if there's room. +// This is non-blocking. +func freeEventLog(el *eventLog) { + el.reset() + select { + case freeEventLogs <- el: + default: + } +} + +var eventsTmplCache *template.Template +var eventsTmplOnce sync.Once + +func eventsTmpl() *template.Template { + eventsTmplOnce.Do(func() { + eventsTmplCache = template.Must(template.New("events").Funcs(template.FuncMap{ + "elapsed": elapsed, + "trimSpace": strings.TrimSpace, + }).Parse(eventsHTML)) + }) + return eventsTmplCache +} + +const eventsHTML = ` + + + events + + + + +

/debug/events

+ + + {{range $i, $fam := .Families}} + + + + {{range $j, $bucket := $.Buckets}} + {{$n := index $.Counts $i $j}} + + {{end}} + + {{end}} +
{{$fam}} + {{if $n}}{{end}} + [{{$n}} {{$bucket.String}}] + {{if $n}}{{end}} +
+ +{{if $.EventLogs}} +
+

Family: {{$.Family}}

+ +{{if $.Expanded}}{{end}} +[Summary]{{if $.Expanded}}{{end}} + +{{if not $.Expanded}}{{end}} +[Expanded]{{if not $.Expanded}}{{end}} + + + + {{range $el := $.EventLogs}} + + + + + {{if $.Expanded}} + + + + + + {{range $el.Events}} + + + + + + {{end}} + {{end}} + {{end}} +
WhenElapsed
{{$el.When}}{{$el.ElapsedTime}}{{$el.Title}} +
{{$el.Stack|trimSpace}}
{{.WhenString}}{{elapsed .Elapsed}}.{{if .IsErr}}E{{else}}.{{end}}. {{.What}}
+{{end}} + + +` diff --git a/vendor/golang.org/x/net/trace/histogram.go b/vendor/golang.org/x/net/trace/histogram.go new file mode 100644 index 00000000000..9bf4286c794 --- /dev/null +++ b/vendor/golang.org/x/net/trace/histogram.go @@ -0,0 +1,365 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package trace + +// This file implements histogramming for RPC statistics collection. + +import ( + "bytes" + "fmt" + "html/template" + "log" + "math" + "sync" + + "golang.org/x/net/internal/timeseries" +) + +const ( + bucketCount = 38 +) + +// histogram keeps counts of values in buckets that are spaced +// out in powers of 2: 0-1, 2-3, 4-7... +// histogram implements timeseries.Observable +type histogram struct { + sum int64 // running total of measurements + sumOfSquares float64 // square of running total + buckets []int64 // bucketed values for histogram + value int // holds a single value as an optimization + valueCount int64 // number of values recorded for single value +} + +// AddMeasurement records a value measurement observation to the histogram. +func (h *histogram) addMeasurement(value int64) { + // TODO: assert invariant + h.sum += value + h.sumOfSquares += float64(value) * float64(value) + + bucketIndex := getBucket(value) + + if h.valueCount == 0 || (h.valueCount > 0 && h.value == bucketIndex) { + h.value = bucketIndex + h.valueCount++ + } else { + h.allocateBuckets() + h.buckets[bucketIndex]++ + } +} + +func (h *histogram) allocateBuckets() { + if h.buckets == nil { + h.buckets = make([]int64, bucketCount) + h.buckets[h.value] = h.valueCount + h.value = 0 + h.valueCount = -1 + } +} + +func log2(i int64) int { + n := 0 + for ; i >= 0x100; i >>= 8 { + n += 8 + } + for ; i > 0; i >>= 1 { + n += 1 + } + return n +} + +func getBucket(i int64) (index int) { + index = log2(i) - 1 + if index < 0 { + index = 0 + } + if index >= bucketCount { + index = bucketCount - 1 + } + return +} + +// Total returns the number of recorded observations. +func (h *histogram) total() (total int64) { + if h.valueCount >= 0 { + total = h.valueCount + } + for _, val := range h.buckets { + total += int64(val) + } + return +} + +// Average returns the average value of recorded observations. +func (h *histogram) average() float64 { + t := h.total() + if t == 0 { + return 0 + } + return float64(h.sum) / float64(t) +} + +// Variance returns the variance of recorded observations. +func (h *histogram) variance() float64 { + t := float64(h.total()) + if t == 0 { + return 0 + } + s := float64(h.sum) / t + return h.sumOfSquares/t - s*s +} + +// StandardDeviation returns the standard deviation of recorded observations. +func (h *histogram) standardDeviation() float64 { + return math.Sqrt(h.variance()) +} + +// PercentileBoundary estimates the value that the given fraction of recorded +// observations are less than. +func (h *histogram) percentileBoundary(percentile float64) int64 { + total := h.total() + + // Corner cases (make sure result is strictly less than Total()) + if total == 0 { + return 0 + } else if total == 1 { + return int64(h.average()) + } + + percentOfTotal := round(float64(total) * percentile) + var runningTotal int64 + + for i := range h.buckets { + value := h.buckets[i] + runningTotal += value + if runningTotal == percentOfTotal { + // We hit an exact bucket boundary. If the next bucket has data, it is a + // good estimate of the value. If the bucket is empty, we interpolate the + // midpoint between the next bucket's boundary and the next non-zero + // bucket. If the remaining buckets are all empty, then we use the + // boundary for the next bucket as the estimate. + j := uint8(i + 1) + min := bucketBoundary(j) + if runningTotal < total { + for h.buckets[j] == 0 { + j++ + } + } + max := bucketBoundary(j) + return min + round(float64(max-min)/2) + } else if runningTotal > percentOfTotal { + // The value is in this bucket. Interpolate the value. + delta := runningTotal - percentOfTotal + percentBucket := float64(value-delta) / float64(value) + bucketMin := bucketBoundary(uint8(i)) + nextBucketMin := bucketBoundary(uint8(i + 1)) + bucketSize := nextBucketMin - bucketMin + return bucketMin + round(percentBucket*float64(bucketSize)) + } + } + return bucketBoundary(bucketCount - 1) +} + +// Median returns the estimated median of the observed values. +func (h *histogram) median() int64 { + return h.percentileBoundary(0.5) +} + +// Add adds other to h. +func (h *histogram) Add(other timeseries.Observable) { + o := other.(*histogram) + if o.valueCount == 0 { + // Other histogram is empty + } else if h.valueCount >= 0 && o.valueCount > 0 && h.value == o.value { + // Both have a single bucketed value, aggregate them + h.valueCount += o.valueCount + } else { + // Two different values necessitate buckets in this histogram + h.allocateBuckets() + if o.valueCount >= 0 { + h.buckets[o.value] += o.valueCount + } else { + for i := range h.buckets { + h.buckets[i] += o.buckets[i] + } + } + } + h.sumOfSquares += o.sumOfSquares + h.sum += o.sum +} + +// Clear resets the histogram to an empty state, removing all observed values. +func (h *histogram) Clear() { + h.buckets = nil + h.value = 0 + h.valueCount = 0 + h.sum = 0 + h.sumOfSquares = 0 +} + +// CopyFrom copies from other, which must be a *histogram, into h. +func (h *histogram) CopyFrom(other timeseries.Observable) { + o := other.(*histogram) + if o.valueCount == -1 { + h.allocateBuckets() + copy(h.buckets, o.buckets) + } + h.sum = o.sum + h.sumOfSquares = o.sumOfSquares + h.value = o.value + h.valueCount = o.valueCount +} + +// Multiply scales the histogram by the specified ratio. +func (h *histogram) Multiply(ratio float64) { + if h.valueCount == -1 { + for i := range h.buckets { + h.buckets[i] = int64(float64(h.buckets[i]) * ratio) + } + } else { + h.valueCount = int64(float64(h.valueCount) * ratio) + } + h.sum = int64(float64(h.sum) * ratio) + h.sumOfSquares = h.sumOfSquares * ratio +} + +// New creates a new histogram. +func (h *histogram) New() timeseries.Observable { + r := new(histogram) + r.Clear() + return r +} + +func (h *histogram) String() string { + return fmt.Sprintf("%d, %f, %d, %d, %v", + h.sum, h.sumOfSquares, h.value, h.valueCount, h.buckets) +} + +// round returns the closest int64 to the argument +func round(in float64) int64 { + return int64(math.Floor(in + 0.5)) +} + +// bucketBoundary returns the first value in the bucket. +func bucketBoundary(bucket uint8) int64 { + if bucket == 0 { + return 0 + } + return 1 << bucket +} + +// bucketData holds data about a specific bucket for use in distTmpl. +type bucketData struct { + Lower, Upper int64 + N int64 + Pct, CumulativePct float64 + GraphWidth int +} + +// data holds data about a Distribution for use in distTmpl. +type data struct { + Buckets []*bucketData + Count, Median int64 + Mean, StandardDeviation float64 +} + +// maxHTMLBarWidth is the maximum width of the HTML bar for visualizing buckets. +const maxHTMLBarWidth = 350.0 + +// newData returns data representing h for use in distTmpl. +func (h *histogram) newData() *data { + // Force the allocation of buckets to simplify the rendering implementation + h.allocateBuckets() + // We scale the bars on the right so that the largest bar is + // maxHTMLBarWidth pixels in width. + maxBucket := int64(0) + for _, n := range h.buckets { + if n > maxBucket { + maxBucket = n + } + } + total := h.total() + barsizeMult := maxHTMLBarWidth / float64(maxBucket) + var pctMult float64 + if total == 0 { + pctMult = 1.0 + } else { + pctMult = 100.0 / float64(total) + } + + buckets := make([]*bucketData, len(h.buckets)) + runningTotal := int64(0) + for i, n := range h.buckets { + if n == 0 { + continue + } + runningTotal += n + var upperBound int64 + if i < bucketCount-1 { + upperBound = bucketBoundary(uint8(i + 1)) + } else { + upperBound = math.MaxInt64 + } + buckets[i] = &bucketData{ + Lower: bucketBoundary(uint8(i)), + Upper: upperBound, + N: n, + Pct: float64(n) * pctMult, + CumulativePct: float64(runningTotal) * pctMult, + GraphWidth: int(float64(n) * barsizeMult), + } + } + return &data{ + Buckets: buckets, + Count: total, + Median: h.median(), + Mean: h.average(), + StandardDeviation: h.standardDeviation(), + } +} + +func (h *histogram) html() template.HTML { + buf := new(bytes.Buffer) + if err := distTmpl().Execute(buf, h.newData()); err != nil { + buf.Reset() + log.Printf("net/trace: couldn't execute template: %v", err) + } + return template.HTML(buf.String()) +} + +var distTmplCache *template.Template +var distTmplOnce sync.Once + +func distTmpl() *template.Template { + distTmplOnce.Do(func() { + // Input: data + distTmplCache = template.Must(template.New("distTmpl").Parse(` + + + + + + + +
Count: {{.Count}}Mean: {{printf "%.0f" .Mean}}StdDev: {{printf "%.0f" .StandardDeviation}}Median: {{.Median}}
+
+ +{{range $b := .Buckets}} +{{if $b}} + + + + + + + + + +{{end}} +{{end}} +
[{{.Lower}},{{.Upper}}){{.N}}{{printf "%#.3f" .Pct}}%{{printf "%#.3f" .CumulativePct}}%
+`)) + }) + return distTmplCache +} diff --git a/vendor/golang.org/x/net/trace/trace.go b/vendor/golang.org/x/net/trace/trace.go new file mode 100644 index 00000000000..a46ee0eaa31 --- /dev/null +++ b/vendor/golang.org/x/net/trace/trace.go @@ -0,0 +1,1103 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package trace implements tracing of requests and long-lived objects. +It exports HTTP interfaces on /debug/requests and /debug/events. + +A trace.Trace provides tracing for short-lived objects, usually requests. +A request handler might be implemented like this: + + func fooHandler(w http.ResponseWriter, req *http.Request) { + tr := trace.New("mypkg.Foo", req.URL.Path) + defer tr.Finish() + ... + tr.LazyPrintf("some event %q happened", str) + ... + if err := somethingImportant(); err != nil { + tr.LazyPrintf("somethingImportant failed: %v", err) + tr.SetError() + } + } + +The /debug/requests HTTP endpoint organizes the traces by family, +errors, and duration. It also provides histogram of request duration +for each family. + +A trace.EventLog provides tracing for long-lived objects, such as RPC +connections. + + // A Fetcher fetches URL paths for a single domain. + type Fetcher struct { + domain string + events trace.EventLog + } + + func NewFetcher(domain string) *Fetcher { + return &Fetcher{ + domain, + trace.NewEventLog("mypkg.Fetcher", domain), + } + } + + func (f *Fetcher) Fetch(path string) (string, error) { + resp, err := http.Get("http://" + f.domain + "/" + path) + if err != nil { + f.events.Errorf("Get(%q) = %v", path, err) + return "", err + } + f.events.Printf("Get(%q) = %s", path, resp.Status) + ... + } + + func (f *Fetcher) Close() error { + f.events.Finish() + return nil + } + +The /debug/events HTTP endpoint organizes the event logs by family and +by time since the last error. The expanded view displays recent log +entries and the log's call stack. +*/ +package trace // import "golang.org/x/net/trace" + +import ( + "bytes" + "fmt" + "html/template" + "io" + "log" + "net" + "net/http" + "runtime" + "sort" + "strconv" + "sync" + "sync/atomic" + "time" + + "golang.org/x/net/internal/timeseries" +) + +// DebugUseAfterFinish controls whether to debug uses of Trace values after finishing. +// FOR DEBUGGING ONLY. This will slow down the program. +var DebugUseAfterFinish = false + +// AuthRequest determines whether a specific request is permitted to load the +// /debug/requests or /debug/events pages. +// +// It returns two bools; the first indicates whether the page may be viewed at all, +// and the second indicates whether sensitive events will be shown. +// +// AuthRequest may be replaced by a program to customize its authorization requirements. +// +// The default AuthRequest function returns (true, true) if and only if the request +// comes from localhost/127.0.0.1/[::1]. +var AuthRequest = func(req *http.Request) (any, sensitive bool) { + // RemoteAddr is commonly in the form "IP" or "IP:port". + // If it is in the form "IP:port", split off the port. + host, _, err := net.SplitHostPort(req.RemoteAddr) + if err != nil { + host = req.RemoteAddr + } + switch host { + case "localhost", "127.0.0.1", "::1": + return true, true + default: + return false, false + } +} + +func init() { + // TODO(jbd): Serve Traces from /debug/traces in the future? + // There is no requirement for a request to be present to have traces. + http.HandleFunc("/debug/requests", Traces) + http.HandleFunc("/debug/events", Events) +} + +// Traces responds with traces from the program. +// The package initialization registers it in http.DefaultServeMux +// at /debug/requests. +// +// It performs authorization by running AuthRequest. +func Traces(w http.ResponseWriter, req *http.Request) { + any, sensitive := AuthRequest(req) + if !any { + http.Error(w, "not allowed", http.StatusUnauthorized) + return + } + w.Header().Set("Content-Type", "text/html; charset=utf-8") + Render(w, req, sensitive) +} + +// Events responds with a page of events collected by EventLogs. +// The package initialization registers it in http.DefaultServeMux +// at /debug/events. +// +// It performs authorization by running AuthRequest. +func Events(w http.ResponseWriter, req *http.Request) { + any, sensitive := AuthRequest(req) + if !any { + http.Error(w, "not allowed", http.StatusUnauthorized) + return + } + w.Header().Set("Content-Type", "text/html; charset=utf-8") + RenderEvents(w, req, sensitive) +} + +// Render renders the HTML page typically served at /debug/requests. +// It does not do any auth checking. The request may be nil. +// +// Most users will use the Traces handler. +func Render(w io.Writer, req *http.Request, sensitive bool) { + data := &struct { + Families []string + ActiveTraceCount map[string]int + CompletedTraces map[string]*family + + // Set when a bucket has been selected. + Traces traceList + Family string + Bucket int + Expanded bool + Traced bool + Active bool + ShowSensitive bool // whether to show sensitive events + + Histogram template.HTML + HistogramWindow string // e.g. "last minute", "last hour", "all time" + + // If non-zero, the set of traces is a partial set, + // and this is the total number. + Total int + }{ + CompletedTraces: completedTraces, + } + + data.ShowSensitive = sensitive + if req != nil { + // Allow show_sensitive=0 to force hiding of sensitive data for testing. + // This only goes one way; you can't use show_sensitive=1 to see things. + if req.FormValue("show_sensitive") == "0" { + data.ShowSensitive = false + } + + if exp, err := strconv.ParseBool(req.FormValue("exp")); err == nil { + data.Expanded = exp + } + if exp, err := strconv.ParseBool(req.FormValue("rtraced")); err == nil { + data.Traced = exp + } + } + + completedMu.RLock() + data.Families = make([]string, 0, len(completedTraces)) + for fam := range completedTraces { + data.Families = append(data.Families, fam) + } + completedMu.RUnlock() + sort.Strings(data.Families) + + // We are careful here to minimize the time spent locking activeMu, + // since that lock is required every time an RPC starts and finishes. + data.ActiveTraceCount = make(map[string]int, len(data.Families)) + activeMu.RLock() + for fam, s := range activeTraces { + data.ActiveTraceCount[fam] = s.Len() + } + activeMu.RUnlock() + + var ok bool + data.Family, data.Bucket, ok = parseArgs(req) + switch { + case !ok: + // No-op + case data.Bucket == -1: + data.Active = true + n := data.ActiveTraceCount[data.Family] + data.Traces = getActiveTraces(data.Family) + if len(data.Traces) < n { + data.Total = n + } + case data.Bucket < bucketsPerFamily: + if b := lookupBucket(data.Family, data.Bucket); b != nil { + data.Traces = b.Copy(data.Traced) + } + default: + if f := getFamily(data.Family, false); f != nil { + var obs timeseries.Observable + f.LatencyMu.RLock() + switch o := data.Bucket - bucketsPerFamily; o { + case 0: + obs = f.Latency.Minute() + data.HistogramWindow = "last minute" + case 1: + obs = f.Latency.Hour() + data.HistogramWindow = "last hour" + case 2: + obs = f.Latency.Total() + data.HistogramWindow = "all time" + } + f.LatencyMu.RUnlock() + if obs != nil { + data.Histogram = obs.(*histogram).html() + } + } + } + + if data.Traces != nil { + defer data.Traces.Free() + sort.Sort(data.Traces) + } + + completedMu.RLock() + defer completedMu.RUnlock() + if err := pageTmpl().ExecuteTemplate(w, "Page", data); err != nil { + log.Printf("net/trace: Failed executing template: %v", err) + } +} + +func parseArgs(req *http.Request) (fam string, b int, ok bool) { + if req == nil { + return "", 0, false + } + fam, bStr := req.FormValue("fam"), req.FormValue("b") + if fam == "" || bStr == "" { + return "", 0, false + } + b, err := strconv.Atoi(bStr) + if err != nil || b < -1 { + return "", 0, false + } + + return fam, b, true +} + +func lookupBucket(fam string, b int) *traceBucket { + f := getFamily(fam, false) + if f == nil || b < 0 || b >= len(f.Buckets) { + return nil + } + return f.Buckets[b] +} + +type contextKeyT string + +var contextKey = contextKeyT("golang.org/x/net/trace.Trace") + +// Trace represents an active request. +type Trace interface { + // LazyLog adds x to the event log. It will be evaluated each time the + // /debug/requests page is rendered. Any memory referenced by x will be + // pinned until the trace is finished and later discarded. + LazyLog(x fmt.Stringer, sensitive bool) + + // LazyPrintf evaluates its arguments with fmt.Sprintf each time the + // /debug/requests page is rendered. Any memory referenced by a will be + // pinned until the trace is finished and later discarded. + LazyPrintf(format string, a ...interface{}) + + // SetError declares that this trace resulted in an error. + SetError() + + // SetRecycler sets a recycler for the trace. + // f will be called for each event passed to LazyLog at a time when + // it is no longer required, whether while the trace is still active + // and the event is discarded, or when a completed trace is discarded. + SetRecycler(f func(interface{})) + + // SetTraceInfo sets the trace info for the trace. + // This is currently unused. + SetTraceInfo(traceID, spanID uint64) + + // SetMaxEvents sets the maximum number of events that will be stored + // in the trace. This has no effect if any events have already been + // added to the trace. + SetMaxEvents(m int) + + // Finish declares that this trace is complete. + // The trace should not be used after calling this method. + Finish() +} + +type lazySprintf struct { + format string + a []interface{} +} + +func (l *lazySprintf) String() string { + return fmt.Sprintf(l.format, l.a...) +} + +// New returns a new Trace with the specified family and title. +func New(family, title string) Trace { + tr := newTrace() + tr.ref() + tr.Family, tr.Title = family, title + tr.Start = time.Now() + tr.maxEvents = maxEventsPerTrace + tr.events = tr.eventsBuf[:0] + + activeMu.RLock() + s := activeTraces[tr.Family] + activeMu.RUnlock() + if s == nil { + activeMu.Lock() + s = activeTraces[tr.Family] // check again + if s == nil { + s = new(traceSet) + activeTraces[tr.Family] = s + } + activeMu.Unlock() + } + s.Add(tr) + + // Trigger allocation of the completed trace structure for this family. + // This will cause the family to be present in the request page during + // the first trace of this family. We don't care about the return value, + // nor is there any need for this to run inline, so we execute it in its + // own goroutine, but only if the family isn't allocated yet. + completedMu.RLock() + if _, ok := completedTraces[tr.Family]; !ok { + go allocFamily(tr.Family) + } + completedMu.RUnlock() + + return tr +} + +func (tr *trace) Finish() { + elapsed := time.Now().Sub(tr.Start) + tr.mu.Lock() + tr.Elapsed = elapsed + tr.mu.Unlock() + + if DebugUseAfterFinish { + buf := make([]byte, 4<<10) // 4 KB should be enough + n := runtime.Stack(buf, false) + tr.finishStack = buf[:n] + } + + activeMu.RLock() + m := activeTraces[tr.Family] + activeMu.RUnlock() + m.Remove(tr) + + f := getFamily(tr.Family, true) + tr.mu.RLock() // protects tr fields in Cond.match calls + for _, b := range f.Buckets { + if b.Cond.match(tr) { + b.Add(tr) + } + } + tr.mu.RUnlock() + + // Add a sample of elapsed time as microseconds to the family's timeseries + h := new(histogram) + h.addMeasurement(elapsed.Nanoseconds() / 1e3) + f.LatencyMu.Lock() + f.Latency.Add(h) + f.LatencyMu.Unlock() + + tr.unref() // matches ref in New +} + +const ( + bucketsPerFamily = 9 + tracesPerBucket = 10 + maxActiveTraces = 20 // Maximum number of active traces to show. + maxEventsPerTrace = 10 + numHistogramBuckets = 38 +) + +var ( + // The active traces. + activeMu sync.RWMutex + activeTraces = make(map[string]*traceSet) // family -> traces + + // Families of completed traces. + completedMu sync.RWMutex + completedTraces = make(map[string]*family) // family -> traces +) + +type traceSet struct { + mu sync.RWMutex + m map[*trace]bool + + // We could avoid the entire map scan in FirstN by having a slice of all the traces + // ordered by start time, and an index into that from the trace struct, with a periodic + // repack of the slice after enough traces finish; we could also use a skip list or similar. + // However, that would shift some of the expense from /debug/requests time to RPC time, + // which is probably the wrong trade-off. +} + +func (ts *traceSet) Len() int { + ts.mu.RLock() + defer ts.mu.RUnlock() + return len(ts.m) +} + +func (ts *traceSet) Add(tr *trace) { + ts.mu.Lock() + if ts.m == nil { + ts.m = make(map[*trace]bool) + } + ts.m[tr] = true + ts.mu.Unlock() +} + +func (ts *traceSet) Remove(tr *trace) { + ts.mu.Lock() + delete(ts.m, tr) + ts.mu.Unlock() +} + +// FirstN returns the first n traces ordered by time. +func (ts *traceSet) FirstN(n int) traceList { + ts.mu.RLock() + defer ts.mu.RUnlock() + + if n > len(ts.m) { + n = len(ts.m) + } + trl := make(traceList, 0, n) + + // Fast path for when no selectivity is needed. + if n == len(ts.m) { + for tr := range ts.m { + tr.ref() + trl = append(trl, tr) + } + sort.Sort(trl) + return trl + } + + // Pick the oldest n traces. + // This is inefficient. See the comment in the traceSet struct. + for tr := range ts.m { + // Put the first n traces into trl in the order they occur. + // When we have n, sort trl, and thereafter maintain its order. + if len(trl) < n { + tr.ref() + trl = append(trl, tr) + if len(trl) == n { + // This is guaranteed to happen exactly once during this loop. + sort.Sort(trl) + } + continue + } + if tr.Start.After(trl[n-1].Start) { + continue + } + + // Find where to insert this one. + tr.ref() + i := sort.Search(n, func(i int) bool { return trl[i].Start.After(tr.Start) }) + trl[n-1].unref() + copy(trl[i+1:], trl[i:]) + trl[i] = tr + } + + return trl +} + +func getActiveTraces(fam string) traceList { + activeMu.RLock() + s := activeTraces[fam] + activeMu.RUnlock() + if s == nil { + return nil + } + return s.FirstN(maxActiveTraces) +} + +func getFamily(fam string, allocNew bool) *family { + completedMu.RLock() + f := completedTraces[fam] + completedMu.RUnlock() + if f == nil && allocNew { + f = allocFamily(fam) + } + return f +} + +func allocFamily(fam string) *family { + completedMu.Lock() + defer completedMu.Unlock() + f := completedTraces[fam] + if f == nil { + f = newFamily() + completedTraces[fam] = f + } + return f +} + +// family represents a set of trace buckets and associated latency information. +type family struct { + // traces may occur in multiple buckets. + Buckets [bucketsPerFamily]*traceBucket + + // latency time series + LatencyMu sync.RWMutex + Latency *timeseries.MinuteHourSeries +} + +func newFamily() *family { + return &family{ + Buckets: [bucketsPerFamily]*traceBucket{ + {Cond: minCond(0)}, + {Cond: minCond(50 * time.Millisecond)}, + {Cond: minCond(100 * time.Millisecond)}, + {Cond: minCond(200 * time.Millisecond)}, + {Cond: minCond(500 * time.Millisecond)}, + {Cond: minCond(1 * time.Second)}, + {Cond: minCond(10 * time.Second)}, + {Cond: minCond(100 * time.Second)}, + {Cond: errorCond{}}, + }, + Latency: timeseries.NewMinuteHourSeries(func() timeseries.Observable { return new(histogram) }), + } +} + +// traceBucket represents a size-capped bucket of historic traces, +// along with a condition for a trace to belong to the bucket. +type traceBucket struct { + Cond cond + + // Ring buffer implementation of a fixed-size FIFO queue. + mu sync.RWMutex + buf [tracesPerBucket]*trace + start int // < tracesPerBucket + length int // <= tracesPerBucket +} + +func (b *traceBucket) Add(tr *trace) { + b.mu.Lock() + defer b.mu.Unlock() + + i := b.start + b.length + if i >= tracesPerBucket { + i -= tracesPerBucket + } + if b.length == tracesPerBucket { + // "Remove" an element from the bucket. + b.buf[i].unref() + b.start++ + if b.start == tracesPerBucket { + b.start = 0 + } + } + b.buf[i] = tr + if b.length < tracesPerBucket { + b.length++ + } + tr.ref() +} + +// Copy returns a copy of the traces in the bucket. +// If tracedOnly is true, only the traces with trace information will be returned. +// The logs will be ref'd before returning; the caller should call +// the Free method when it is done with them. +// TODO(dsymonds): keep track of traced requests in separate buckets. +func (b *traceBucket) Copy(tracedOnly bool) traceList { + b.mu.RLock() + defer b.mu.RUnlock() + + trl := make(traceList, 0, b.length) + for i, x := 0, b.start; i < b.length; i++ { + tr := b.buf[x] + if !tracedOnly || tr.spanID != 0 { + tr.ref() + trl = append(trl, tr) + } + x++ + if x == b.length { + x = 0 + } + } + return trl +} + +func (b *traceBucket) Empty() bool { + b.mu.RLock() + defer b.mu.RUnlock() + return b.length == 0 +} + +// cond represents a condition on a trace. +type cond interface { + match(t *trace) bool + String() string +} + +type minCond time.Duration + +func (m minCond) match(t *trace) bool { return t.Elapsed >= time.Duration(m) } +func (m minCond) String() string { return fmt.Sprintf("≥%gs", time.Duration(m).Seconds()) } + +type errorCond struct{} + +func (e errorCond) match(t *trace) bool { return t.IsError } +func (e errorCond) String() string { return "errors" } + +type traceList []*trace + +// Free calls unref on each element of the list. +func (trl traceList) Free() { + for _, t := range trl { + t.unref() + } +} + +// traceList may be sorted in reverse chronological order. +func (trl traceList) Len() int { return len(trl) } +func (trl traceList) Less(i, j int) bool { return trl[i].Start.After(trl[j].Start) } +func (trl traceList) Swap(i, j int) { trl[i], trl[j] = trl[j], trl[i] } + +// An event is a timestamped log entry in a trace. +type event struct { + When time.Time + Elapsed time.Duration // since previous event in trace + NewDay bool // whether this event is on a different day to the previous event + Recyclable bool // whether this event was passed via LazyLog + Sensitive bool // whether this event contains sensitive information + What interface{} // string or fmt.Stringer +} + +// WhenString returns a string representation of the elapsed time of the event. +// It will include the date if midnight was crossed. +func (e event) WhenString() string { + if e.NewDay { + return e.When.Format("2006/01/02 15:04:05.000000") + } + return e.When.Format("15:04:05.000000") +} + +// discarded represents a number of discarded events. +// It is stored as *discarded to make it easier to update in-place. +type discarded int + +func (d *discarded) String() string { + return fmt.Sprintf("(%d events discarded)", int(*d)) +} + +// trace represents an active or complete request, +// either sent or received by this program. +type trace struct { + // Family is the top-level grouping of traces to which this belongs. + Family string + + // Title is the title of this trace. + Title string + + // Start time of the this trace. + Start time.Time + + mu sync.RWMutex + events []event // Append-only sequence of events (modulo discards). + maxEvents int + recycler func(interface{}) + IsError bool // Whether this trace resulted in an error. + Elapsed time.Duration // Elapsed time for this trace, zero while active. + traceID uint64 // Trace information if non-zero. + spanID uint64 + + refs int32 // how many buckets this is in + disc discarded // scratch space to avoid allocation + + finishStack []byte // where finish was called, if DebugUseAfterFinish is set + + eventsBuf [4]event // preallocated buffer in case we only log a few events +} + +func (tr *trace) reset() { + // Clear all but the mutex. Mutexes may not be copied, even when unlocked. + tr.Family = "" + tr.Title = "" + tr.Start = time.Time{} + + tr.mu.Lock() + tr.Elapsed = 0 + tr.traceID = 0 + tr.spanID = 0 + tr.IsError = false + tr.maxEvents = 0 + tr.events = nil + tr.recycler = nil + tr.mu.Unlock() + + tr.refs = 0 + tr.disc = 0 + tr.finishStack = nil + for i := range tr.eventsBuf { + tr.eventsBuf[i] = event{} + } +} + +// delta returns the elapsed time since the last event or the trace start, +// and whether it spans midnight. +// L >= tr.mu +func (tr *trace) delta(t time.Time) (time.Duration, bool) { + if len(tr.events) == 0 { + return t.Sub(tr.Start), false + } + prev := tr.events[len(tr.events)-1].When + return t.Sub(prev), prev.Day() != t.Day() +} + +func (tr *trace) addEvent(x interface{}, recyclable, sensitive bool) { + if DebugUseAfterFinish && tr.finishStack != nil { + buf := make([]byte, 4<<10) // 4 KB should be enough + n := runtime.Stack(buf, false) + log.Printf("net/trace: trace used after finish:\nFinished at:\n%s\nUsed at:\n%s", tr.finishStack, buf[:n]) + } + + /* + NOTE TO DEBUGGERS + + If you are here because your program panicked in this code, + it is almost definitely the fault of code using this package, + and very unlikely to be the fault of this code. + + The most likely scenario is that some code elsewhere is using + a trace.Trace after its Finish method is called. + You can temporarily set the DebugUseAfterFinish var + to help discover where that is; do not leave that var set, + since it makes this package much less efficient. + */ + + e := event{When: time.Now(), What: x, Recyclable: recyclable, Sensitive: sensitive} + tr.mu.Lock() + e.Elapsed, e.NewDay = tr.delta(e.When) + if len(tr.events) < tr.maxEvents { + tr.events = append(tr.events, e) + } else { + // Discard the middle events. + di := int((tr.maxEvents - 1) / 2) + if d, ok := tr.events[di].What.(*discarded); ok { + (*d)++ + } else { + // disc starts at two to count for the event it is replacing, + // plus the next one that we are about to drop. + tr.disc = 2 + if tr.recycler != nil && tr.events[di].Recyclable { + go tr.recycler(tr.events[di].What) + } + tr.events[di].What = &tr.disc + } + // The timestamp of the discarded meta-event should be + // the time of the last event it is representing. + tr.events[di].When = tr.events[di+1].When + + if tr.recycler != nil && tr.events[di+1].Recyclable { + go tr.recycler(tr.events[di+1].What) + } + copy(tr.events[di+1:], tr.events[di+2:]) + tr.events[tr.maxEvents-1] = e + } + tr.mu.Unlock() +} + +func (tr *trace) LazyLog(x fmt.Stringer, sensitive bool) { + tr.addEvent(x, true, sensitive) +} + +func (tr *trace) LazyPrintf(format string, a ...interface{}) { + tr.addEvent(&lazySprintf{format, a}, false, false) +} + +func (tr *trace) SetError() { + tr.mu.Lock() + tr.IsError = true + tr.mu.Unlock() +} + +func (tr *trace) SetRecycler(f func(interface{})) { + tr.mu.Lock() + tr.recycler = f + tr.mu.Unlock() +} + +func (tr *trace) SetTraceInfo(traceID, spanID uint64) { + tr.mu.Lock() + tr.traceID, tr.spanID = traceID, spanID + tr.mu.Unlock() +} + +func (tr *trace) SetMaxEvents(m int) { + tr.mu.Lock() + // Always keep at least three events: first, discarded count, last. + if len(tr.events) == 0 && m > 3 { + tr.maxEvents = m + } + tr.mu.Unlock() +} + +func (tr *trace) ref() { + atomic.AddInt32(&tr.refs, 1) +} + +func (tr *trace) unref() { + if atomic.AddInt32(&tr.refs, -1) == 0 { + tr.mu.RLock() + if tr.recycler != nil { + // freeTrace clears tr, so we hold tr.recycler and tr.events here. + go func(f func(interface{}), es []event) { + for _, e := range es { + if e.Recyclable { + f(e.What) + } + } + }(tr.recycler, tr.events) + } + tr.mu.RUnlock() + + freeTrace(tr) + } +} + +func (tr *trace) When() string { + return tr.Start.Format("2006/01/02 15:04:05.000000") +} + +func (tr *trace) ElapsedTime() string { + tr.mu.RLock() + t := tr.Elapsed + tr.mu.RUnlock() + + if t == 0 { + // Active trace. + t = time.Since(tr.Start) + } + return fmt.Sprintf("%.6f", t.Seconds()) +} + +func (tr *trace) Events() []event { + tr.mu.RLock() + defer tr.mu.RUnlock() + return tr.events +} + +var traceFreeList = make(chan *trace, 1000) // TODO(dsymonds): Use sync.Pool? + +// newTrace returns a trace ready to use. +func newTrace() *trace { + select { + case tr := <-traceFreeList: + return tr + default: + return new(trace) + } +} + +// freeTrace adds tr to traceFreeList if there's room. +// This is non-blocking. +func freeTrace(tr *trace) { + if DebugUseAfterFinish { + return // never reuse + } + tr.reset() + select { + case traceFreeList <- tr: + default: + } +} + +func elapsed(d time.Duration) string { + b := []byte(fmt.Sprintf("%.6f", d.Seconds())) + + // For subsecond durations, blank all zeros before decimal point, + // and all zeros between the decimal point and the first non-zero digit. + if d < time.Second { + dot := bytes.IndexByte(b, '.') + for i := 0; i < dot; i++ { + b[i] = ' ' + } + for i := dot + 1; i < len(b); i++ { + if b[i] == '0' { + b[i] = ' ' + } else { + break + } + } + } + + return string(b) +} + +var pageTmplCache *template.Template +var pageTmplOnce sync.Once + +func pageTmpl() *template.Template { + pageTmplOnce.Do(func() { + pageTmplCache = template.Must(template.New("Page").Funcs(template.FuncMap{ + "elapsed": elapsed, + "add": func(a, b int) int { return a + b }, + }).Parse(pageHTML)) + }) + return pageTmplCache +} + +const pageHTML = ` +{{template "Prolog" .}} +{{template "StatusTable" .}} +{{template "Epilog" .}} + +{{define "Prolog"}} + + + /debug/requests + + + + +

/debug/requests

+{{end}} {{/* end of Prolog */}} + +{{define "StatusTable"}} + + {{range $fam := .Families}} + + + + {{$n := index $.ActiveTraceCount $fam}} + + + {{$f := index $.CompletedTraces $fam}} + {{range $i, $b := $f.Buckets}} + {{$empty := $b.Empty}} + + {{end}} + + {{$nb := len $f.Buckets}} + + + + + + {{end}} +
{{$fam}} + {{if $n}}{{end}} + [{{$n}} active] + {{if $n}}{{end}} + + {{if not $empty}}{{end}} + [{{.Cond}}] + {{if not $empty}}{{end}} + + [minute] + + [hour] + + [total] +
+{{end}} {{/* end of StatusTable */}} + +{{define "Epilog"}} +{{if $.Traces}} +
+

Family: {{$.Family}}

+ +{{if or $.Expanded $.Traced}} + [Normal/Summary] +{{else}} + [Normal/Summary] +{{end}} + +{{if or (not $.Expanded) $.Traced}} + [Normal/Expanded] +{{else}} + [Normal/Expanded] +{{end}} + +{{if not $.Active}} + {{if or $.Expanded (not $.Traced)}} + [Traced/Summary] + {{else}} + [Traced/Summary] + {{end}} + {{if or (not $.Expanded) (not $.Traced)}} + [Traced/Expanded] + {{else}} + [Traced/Expanded] + {{end}} +{{end}} + +{{if $.Total}} +

Showing {{len $.Traces}} of {{$.Total}} traces.

+{{end}} + + + + + {{range $tr := $.Traces}} + + + + + {{/* TODO: include traceID/spanID */}} + + {{if $.Expanded}} + {{range $tr.Events}} + + + + + + {{end}} + {{end}} + {{end}} +
+ {{if $.Active}}Active{{else}}Completed{{end}} Requests +
WhenElapsed (s)
{{$tr.When}}{{$tr.ElapsedTime}}{{$tr.Title}}
{{.WhenString}}{{elapsed .Elapsed}}{{if or $.ShowSensitive (not .Sensitive)}}... {{.What}}{{else}}[redacted]{{end}}
+{{end}} {{/* if $.Traces */}} + +{{if $.Histogram}} +

Latency (µs) of {{$.Family}} over {{$.HistogramWindow}}

+{{$.Histogram}} +{{end}} {{/* if $.Histogram */}} + + + +{{end}} {{/* end of Epilog */}} +` diff --git a/vendor/golang.org/x/net/trace/trace_go16.go b/vendor/golang.org/x/net/trace/trace_go16.go new file mode 100644 index 00000000000..d6081911853 --- /dev/null +++ b/vendor/golang.org/x/net/trace/trace_go16.go @@ -0,0 +1,21 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.7 + +package trace + +import "golang.org/x/net/context" + +// NewContext returns a copy of the parent context +// and associates it with a Trace. +func NewContext(ctx context.Context, tr Trace) context.Context { + return context.WithValue(ctx, contextKey, tr) +} + +// FromContext returns the Trace bound to the context, if any. +func FromContext(ctx context.Context) (tr Trace, ok bool) { + tr, ok = ctx.Value(contextKey).(Trace) + return +} diff --git a/vendor/golang.org/x/net/trace/trace_go17.go b/vendor/golang.org/x/net/trace/trace_go17.go new file mode 100644 index 00000000000..df6e1fba7ca --- /dev/null +++ b/vendor/golang.org/x/net/trace/trace_go17.go @@ -0,0 +1,21 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.7 + +package trace + +import "context" + +// NewContext returns a copy of the parent context +// and associates it with a Trace. +func NewContext(ctx context.Context, tr Trace) context.Context { + return context.WithValue(ctx, contextKey, tr) +} + +// FromContext returns the Trace bound to the context, if any. +func FromContext(ctx context.Context) (tr Trace, ok bool) { + tr, ok = ctx.Value(contextKey).(Trace) + return +} diff --git a/vendor/golang.org/x/oauth2/CONTRIBUTING.md b/vendor/golang.org/x/oauth2/CONTRIBUTING.md index 46aa2b12dda..dfbed62cf54 100644 --- a/vendor/golang.org/x/oauth2/CONTRIBUTING.md +++ b/vendor/golang.org/x/oauth2/CONTRIBUTING.md @@ -4,16 +4,15 @@ Go is an open source project. It is the work of hundreds of contributors. We appreciate your help! - ## Filing issues When [filing an issue](https://github.com/golang/oauth2/issues), make sure to answer these five questions: -1. What version of Go are you using (`go version`)? -2. What operating system and processor architecture are you using? -3. What did you do? -4. What did you expect to see? -5. What did you see instead? +1. What version of Go are you using (`go version`)? +2. What operating system and processor architecture are you using? +3. What did you do? +4. What did you expect to see? +5. What did you see instead? General questions should go to the [golang-nuts mailing list](https://groups.google.com/group/golang-nuts) instead of the issue tracker. The gophers there will answer or ask you to file an issue if you've tripped over a bug. @@ -23,9 +22,5 @@ The gophers there will answer or ask you to file an issue if you've tripped over Please read the [Contribution Guidelines](https://golang.org/doc/contribute.html) before sending patches. -**We do not accept GitHub pull requests** -(we use [Gerrit](https://code.google.com/p/gerrit/) instead for code review). - Unless otherwise noted, the Go source files are distributed under the BSD-style license found in the LICENSE file. - diff --git a/vendor/golang.org/x/oauth2/google/default.go b/vendor/golang.org/x/oauth2/google/default.go index b4b62745c45..a31607437d3 100644 --- a/vendor/golang.org/x/oauth2/google/default.go +++ b/vendor/golang.org/x/oauth2/google/default.go @@ -18,20 +18,6 @@ import ( "golang.org/x/oauth2" ) -// DefaultCredentials holds "Application Default Credentials". -// For more details, see: -// https://developers.google.com/accounts/docs/application-default-credentials -type DefaultCredentials struct { - ProjectID string // may be empty - TokenSource oauth2.TokenSource - - // JSON contains the raw bytes from a JSON credentials file. - // This field may be nil if authentication is provided by the - // environment and not with a credentials file, e.g. when code is - // running on Google Cloud Platform. - JSON []byte -} - // DefaultClient returns an HTTP Client that uses the // DefaultTokenSource to obtain authentication credentials. func DefaultClient(ctx context.Context, scope ...string) (*http.Client, error) { @@ -53,25 +39,12 @@ func DefaultTokenSource(ctx context.Context, scope ...string) (oauth2.TokenSourc return creds.TokenSource, nil } -// FindDefaultCredentials searches for "Application Default Credentials". -// -// It looks for credentials in the following places, -// preferring the first location found: -// -// 1. A JSON file whose path is specified by the -// GOOGLE_APPLICATION_CREDENTIALS environment variable. -// 2. A JSON file in a location known to the gcloud command-line tool. -// On Windows, this is %APPDATA%/gcloud/application_default_credentials.json. -// On other systems, $HOME/.config/gcloud/application_default_credentials.json. -// 3. On Google App Engine it uses the appengine.AccessToken function. -// 4. On Google Compute Engine and Google App Engine Managed VMs, it fetches -// credentials from the metadata server. -// (In this final case any provided scopes are ignored.) -func FindDefaultCredentials(ctx context.Context, scope ...string) (*DefaultCredentials, error) { +// Common implementation for FindDefaultCredentials. +func findDefaultCredentials(ctx context.Context, scopes []string) (*DefaultCredentials, error) { // First, try the environment variable. const envVar = "GOOGLE_APPLICATION_CREDENTIALS" if filename := os.Getenv(envVar); filename != "" { - creds, err := readCredentialsFile(ctx, filename, scope) + creds, err := readCredentialsFile(ctx, filename, scopes) if err != nil { return nil, fmt.Errorf("google: error getting credentials using %v environment variable: %v", envVar, err) } @@ -80,7 +53,7 @@ func FindDefaultCredentials(ctx context.Context, scope ...string) (*DefaultCrede // Second, try a well-known file. filename := wellKnownFile() - if creds, err := readCredentialsFile(ctx, filename, scope); err == nil { + if creds, err := readCredentialsFile(ctx, filename, scopes); err == nil { return creds, nil } else if !os.IsNotExist(err) { return nil, fmt.Errorf("google: error getting credentials using well-known file (%v): %v", filename, err) @@ -90,7 +63,7 @@ func FindDefaultCredentials(ctx context.Context, scope ...string) (*DefaultCrede if appengineTokenFunc != nil && !appengineFlex { return &DefaultCredentials{ ProjectID: appengineAppIDFunc(ctx), - TokenSource: AppEngineTokenSource(ctx, scope...), + TokenSource: AppEngineTokenSource(ctx, scopes...), }, nil } @@ -108,6 +81,23 @@ func FindDefaultCredentials(ctx context.Context, scope ...string) (*DefaultCrede return nil, fmt.Errorf("google: could not find default credentials. See %v for more information.", url) } +// Common implementation for CredentialsFromJSON. +func credentialsFromJSON(ctx context.Context, jsonData []byte, scopes []string) (*DefaultCredentials, error) { + var f credentialsFile + if err := json.Unmarshal(jsonData, &f); err != nil { + return nil, err + } + ts, err := f.tokenSource(ctx, append([]string(nil), scopes...)) + if err != nil { + return nil, err + } + return &DefaultCredentials{ + ProjectID: f.ProjectID, + TokenSource: ts, + JSON: jsonData, + }, nil +} + func wellKnownFile() string { const f = "application_default_credentials.json" if runtime.GOOS == "windows" { @@ -121,17 +111,5 @@ func readCredentialsFile(ctx context.Context, filename string, scopes []string) if err != nil { return nil, err } - var f credentialsFile - if err := json.Unmarshal(b, &f); err != nil { - return nil, err - } - ts, err := f.tokenSource(ctx, append([]string(nil), scopes...)) - if err != nil { - return nil, err - } - return &DefaultCredentials{ - ProjectID: f.ProjectID, - TokenSource: ts, - JSON: b, - }, nil + return CredentialsFromJSON(ctx, b, scopes...) } diff --git a/vendor/golang.org/x/oauth2/google/doc_go19.go b/vendor/golang.org/x/oauth2/google/doc_go19.go new file mode 100644 index 00000000000..2a86325fe3b --- /dev/null +++ b/vendor/golang.org/x/oauth2/google/doc_go19.go @@ -0,0 +1,42 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.9 + +// Package google provides support for making OAuth2 authorized and authenticated +// HTTP requests to Google APIs. It supports the Web server flow, client-side +// credentials, service accounts, Google Compute Engine service accounts, and Google +// App Engine service accounts. +// +// A brief overview of the package follows. For more information, please read +// https://developers.google.com/accounts/docs/OAuth2 +// and +// https://developers.google.com/accounts/docs/application-default-credentials. +// +// OAuth2 Configs +// +// Two functions in this package return golang.org/x/oauth2.Config values from Google credential +// data. Google supports two JSON formats for OAuth2 credentials: one is handled by ConfigFromJSON, +// the other by JWTConfigFromJSON. The returned Config can be used to obtain a TokenSource or +// create an http.Client. +// +// +// Credentials +// +// The Credentials type represents Google credentials, including Application Default +// Credentials. +// +// Use FindDefaultCredentials to obtain Application Default Credentials. +// FindDefaultCredentials looks in some well-known places for a credentials file, and +// will call AppEngineTokenSource or ComputeTokenSource as needed. +// +// DefaultClient and DefaultTokenSource are convenience methods. They first call FindDefaultCredentials, +// then use the credentials to construct an http.Client or an oauth2.TokenSource. +// +// Use CredentialsFromJSON to obtain credentials from either of the two JSON formats +// described in OAuth2 Configs, above. The TokenSource in the returned value is the +// same as the one obtained from the oauth2.Config returned from ConfigFromJSON or +// JWTConfigFromJSON, but the Credentials may contain additional information +// that is useful is some circumstances. +package google // import "golang.org/x/oauth2/google" diff --git a/vendor/golang.org/x/oauth2/google/doc_not_go19.go b/vendor/golang.org/x/oauth2/google/doc_not_go19.go new file mode 100644 index 00000000000..5c3c6e14812 --- /dev/null +++ b/vendor/golang.org/x/oauth2/google/doc_not_go19.go @@ -0,0 +1,43 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.9 + +// Package google provides support for making OAuth2 authorized and authenticated +// HTTP requests to Google APIs. It supports the Web server flow, client-side +// credentials, service accounts, Google Compute Engine service accounts, and Google +// App Engine service accounts. +// +// A brief overview of the package follows. For more information, please read +// https://developers.google.com/accounts/docs/OAuth2 +// and +// https://developers.google.com/accounts/docs/application-default-credentials. +// +// OAuth2 Configs +// +// Two functions in this package return golang.org/x/oauth2.Config values from Google credential +// data. Google supports two JSON formats for OAuth2 credentials: one is handled by ConfigFromJSON, +// the other by JWTConfigFromJSON. The returned Config can be used to obtain a TokenSource or +// create an http.Client. +// +// +// Credentials +// +// The DefaultCredentials type represents Google Application Default Credentials, as +// well as other forms of credential. +// +// Use FindDefaultCredentials to obtain Application Default Credentials. +// FindDefaultCredentials looks in some well-known places for a credentials file, and +// will call AppEngineTokenSource or ComputeTokenSource as needed. +// +// DefaultClient and DefaultTokenSource are convenience methods. They first call FindDefaultCredentials, +// then use the credentials to construct an http.Client or an oauth2.TokenSource. +// +// Use CredentialsFromJSON to obtain credentials from either of the two JSON +// formats described in OAuth2 Configs, above. (The DefaultCredentials returned may +// not be "Application Default Credentials".) The TokenSource in the returned value +// is the same as the one obtained from the oauth2.Config returned from +// ConfigFromJSON or JWTConfigFromJSON, but the DefaultCredentials may contain +// additional information that is useful is some circumstances. +package google // import "golang.org/x/oauth2/google" diff --git a/vendor/golang.org/x/oauth2/google/go19.go b/vendor/golang.org/x/oauth2/google/go19.go new file mode 100644 index 00000000000..4d0318b1e16 --- /dev/null +++ b/vendor/golang.org/x/oauth2/google/go19.go @@ -0,0 +1,57 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.9 + +package google + +import ( + "golang.org/x/net/context" + "golang.org/x/oauth2" +) + +// Credentials holds Google credentials, including "Application Default Credentials". +// For more details, see: +// https://developers.google.com/accounts/docs/application-default-credentials +type Credentials struct { + ProjectID string // may be empty + TokenSource oauth2.TokenSource + + // JSON contains the raw bytes from a JSON credentials file. + // This field may be nil if authentication is provided by the + // environment and not with a credentials file, e.g. when code is + // running on Google Cloud Platform. + JSON []byte +} + +// DefaultCredentials is the old name of Credentials. +// +// Deprecated: use Credentials instead. +type DefaultCredentials = Credentials + +// FindDefaultCredentials searches for "Application Default Credentials". +// +// It looks for credentials in the following places, +// preferring the first location found: +// +// 1. A JSON file whose path is specified by the +// GOOGLE_APPLICATION_CREDENTIALS environment variable. +// 2. A JSON file in a location known to the gcloud command-line tool. +// On Windows, this is %APPDATA%/gcloud/application_default_credentials.json. +// On other systems, $HOME/.config/gcloud/application_default_credentials.json. +// 3. On Google App Engine it uses the appengine.AccessToken function. +// 4. On Google Compute Engine and Google App Engine Managed VMs, it fetches +// credentials from the metadata server. +// (In this final case any provided scopes are ignored.) +func FindDefaultCredentials(ctx context.Context, scopes ...string) (*Credentials, error) { + return findDefaultCredentials(ctx, scopes) +} + +// CredentialsFromJSON obtains Google credentials from a JSON value. The JSON can +// represent either a Google Developers Console client_credentials.json file (as in +// ConfigFromJSON) or a Google Developers service account key file (as in +// JWTConfigFromJSON). +func CredentialsFromJSON(ctx context.Context, jsonData []byte, scopes ...string) (*Credentials, error) { + return credentialsFromJSON(ctx, jsonData, scopes) +} diff --git a/vendor/golang.org/x/oauth2/google/google.go b/vendor/golang.org/x/oauth2/google/google.go index 66a8b0e1812..f7481fbcc63 100644 --- a/vendor/golang.org/x/oauth2/google/google.go +++ b/vendor/golang.org/x/oauth2/google/google.go @@ -2,17 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// Package google provides support for making OAuth2 authorized and -// authenticated HTTP requests to Google APIs. -// It supports the Web server flow, client-side credentials, service accounts, -// Google Compute Engine service accounts, and Google App Engine service -// accounts. -// -// For more information, please read -// https://developers.google.com/accounts/docs/OAuth2 -// and -// https://developers.google.com/accounts/docs/application-default-credentials. -package google // import "golang.org/x/oauth2/google" +package google import ( "encoding/json" diff --git a/vendor/golang.org/x/oauth2/google/not_go19.go b/vendor/golang.org/x/oauth2/google/not_go19.go new file mode 100644 index 00000000000..544e40624e1 --- /dev/null +++ b/vendor/golang.org/x/oauth2/google/not_go19.go @@ -0,0 +1,54 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.9 + +package google + +import ( + "golang.org/x/net/context" + "golang.org/x/oauth2" +) + +// DefaultCredentials holds Google credentials, including "Application Default Credentials". +// For more details, see: +// https://developers.google.com/accounts/docs/application-default-credentials +type DefaultCredentials struct { + ProjectID string // may be empty + TokenSource oauth2.TokenSource + + // JSON contains the raw bytes from a JSON credentials file. + // This field may be nil if authentication is provided by the + // environment and not with a credentials file, e.g. when code is + // running on Google Cloud Platform. + JSON []byte +} + +// FindDefaultCredentials searches for "Application Default Credentials". +// +// It looks for credentials in the following places, +// preferring the first location found: +// +// 1. A JSON file whose path is specified by the +// GOOGLE_APPLICATION_CREDENTIALS environment variable. +// 2. A JSON file in a location known to the gcloud command-line tool. +// On Windows, this is %APPDATA%/gcloud/application_default_credentials.json. +// On other systems, $HOME/.config/gcloud/application_default_credentials.json. +// 3. On Google App Engine it uses the appengine.AccessToken function. +// 4. On Google Compute Engine and Google App Engine Managed VMs, it fetches +// credentials from the metadata server. +// (In this final case any provided scopes are ignored.) +func FindDefaultCredentials(ctx context.Context, scopes ...string) (*DefaultCredentials, error) { + return findDefaultCredentials(ctx, scopes) +} + +// CredentialsFromJSON obtains Google credentials from a JSON value. The JSON can +// represent either a Google Developers Console client_credentials.json file (as in +// ConfigFromJSON) or a Google Developers service account key file (as in +// JWTConfigFromJSON). +// +// Note: despite the name, the returned credentials may not be Application Default Credentials. +func CredentialsFromJSON(ctx context.Context, jsonData []byte, scopes ...string) (*DefaultCredentials, error) { + return credentialsFromJSON(ctx, jsonData, scopes) +} diff --git a/vendor/golang.org/x/oauth2/internal/token.go b/vendor/golang.org/x/oauth2/internal/token.go index ce3f27e0285..30fb315d139 100644 --- a/vendor/golang.org/x/oauth2/internal/token.go +++ b/vendor/golang.org/x/oauth2/internal/token.go @@ -126,6 +126,8 @@ var brokenAuthHeaderProviders = []string{ "https://api.sipgate.com/v1/authorization/oauth", "https://api.medium.com/v1/tokens", "https://log.finalsurge.com/oauth/token", + "https://multisport.todaysplan.com.au/rest/oauth/access_token", + "https://whats.todaysplan.com.au/rest/oauth/access_token", } // brokenAuthHeaderDomains lists broken providers that issue dynamic endpoints. diff --git a/vendor/golang.org/x/oauth2/oauth2.go b/vendor/golang.org/x/oauth2/oauth2.go index a047a5f98b6..10299d2ef58 100644 --- a/vendor/golang.org/x/oauth2/oauth2.go +++ b/vendor/golang.org/x/oauth2/oauth2.go @@ -3,7 +3,8 @@ // license that can be found in the LICENSE file. // Package oauth2 provides support for making -// OAuth2 authorized and authenticated HTTP requests. +// OAuth2 authorized and authenticated HTTP requests, +// as specified in RFC 6749. // It can additionally grant authorization with Bearer JWT. package oauth2 // import "golang.org/x/oauth2" diff --git a/vendor/golang.org/x/oauth2/transport.go b/vendor/golang.org/x/oauth2/transport.go index 92ac7e2531f..aa0d34f1e0e 100644 --- a/vendor/golang.org/x/oauth2/transport.go +++ b/vendor/golang.org/x/oauth2/transport.go @@ -31,9 +31,17 @@ type Transport struct { } // RoundTrip authorizes and authenticates the request with an -// access token. If no token exists or token is expired, -// tries to refresh/fetch a new token. +// access token from Transport's Source. func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) { + reqBodyClosed := false + if req.Body != nil { + defer func() { + if !reqBodyClosed { + req.Body.Close() + } + }() + } + if t.Source == nil { return nil, errors.New("oauth2: Transport's Source is nil") } @@ -46,6 +54,10 @@ func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) { token.SetAuthHeader(req2) t.setModReq(req, req2) res, err := t.base().RoundTrip(req2) + + // req.Body is assumed to have been closed by the base RoundTripper. + reqBodyClosed = true + if err != nil { t.setModReq(req, nil) return nil, err diff --git a/vendor/golang.org/x/sys/unix/asm_dragonfly_amd64.s b/vendor/golang.org/x/sys/unix/asm_dragonfly_amd64.s index d5ed6726cc1..603dd5728c4 100644 --- a/vendor/golang.org/x/sys/unix/asm_dragonfly_amd64.s +++ b/vendor/golang.org/x/sys/unix/asm_dragonfly_amd64.s @@ -13,17 +13,17 @@ // Just jump to package syscall's implementation for all these functions. // The runtime may know about them. -TEXT ·Syscall(SB),NOSPLIT,$0-64 +TEXT ·Syscall(SB),NOSPLIT,$0-56 JMP syscall·Syscall(SB) -TEXT ·Syscall6(SB),NOSPLIT,$0-88 +TEXT ·Syscall6(SB),NOSPLIT,$0-80 JMP syscall·Syscall6(SB) -TEXT ·Syscall9(SB),NOSPLIT,$0-112 +TEXT ·Syscall9(SB),NOSPLIT,$0-104 JMP syscall·Syscall9(SB) -TEXT ·RawSyscall(SB),NOSPLIT,$0-64 +TEXT ·RawSyscall(SB),NOSPLIT,$0-56 JMP syscall·RawSyscall(SB) -TEXT ·RawSyscall6(SB),NOSPLIT,$0-88 +TEXT ·RawSyscall6(SB),NOSPLIT,$0-80 JMP syscall·RawSyscall6(SB) diff --git a/vendor/golang.org/x/sys/unix/cap_freebsd.go b/vendor/golang.org/x/sys/unix/cap_freebsd.go index 83b6bceab43..df520487737 100644 --- a/vendor/golang.org/x/sys/unix/cap_freebsd.go +++ b/vendor/golang.org/x/sys/unix/cap_freebsd.go @@ -7,7 +7,7 @@ package unix import ( - errorspkg "errors" + "errors" "fmt" ) @@ -60,26 +60,26 @@ func CapRightsSet(rights *CapRights, setrights []uint64) error { n := caparsize(rights) if n < capArSizeMin || n > capArSizeMax { - return errorspkg.New("bad rights size") + return errors.New("bad rights size") } for _, right := range setrights { if caprver(right) != CAP_RIGHTS_VERSION_00 { - return errorspkg.New("bad right version") + return errors.New("bad right version") } i, err := rightToIndex(right) if err != nil { return err } if i >= n { - return errorspkg.New("index overflow") + return errors.New("index overflow") } if capidxbit(rights.Rights[i]) != capidxbit(right) { - return errorspkg.New("index mismatch") + return errors.New("index mismatch") } rights.Rights[i] |= right if capidxbit(rights.Rights[i]) != capidxbit(right) { - return errorspkg.New("index mismatch (after assign)") + return errors.New("index mismatch (after assign)") } } @@ -95,26 +95,26 @@ func CapRightsClear(rights *CapRights, clearrights []uint64) error { n := caparsize(rights) if n < capArSizeMin || n > capArSizeMax { - return errorspkg.New("bad rights size") + return errors.New("bad rights size") } for _, right := range clearrights { if caprver(right) != CAP_RIGHTS_VERSION_00 { - return errorspkg.New("bad right version") + return errors.New("bad right version") } i, err := rightToIndex(right) if err != nil { return err } if i >= n { - return errorspkg.New("index overflow") + return errors.New("index overflow") } if capidxbit(rights.Rights[i]) != capidxbit(right) { - return errorspkg.New("index mismatch") + return errors.New("index mismatch") } rights.Rights[i] &= ^(right & 0x01FFFFFFFFFFFFFF) if capidxbit(rights.Rights[i]) != capidxbit(right) { - return errorspkg.New("index mismatch (after assign)") + return errors.New("index mismatch (after assign)") } } @@ -130,22 +130,22 @@ func CapRightsIsSet(rights *CapRights, setrights []uint64) (bool, error) { n := caparsize(rights) if n < capArSizeMin || n > capArSizeMax { - return false, errorspkg.New("bad rights size") + return false, errors.New("bad rights size") } for _, right := range setrights { if caprver(right) != CAP_RIGHTS_VERSION_00 { - return false, errorspkg.New("bad right version") + return false, errors.New("bad right version") } i, err := rightToIndex(right) if err != nil { return false, err } if i >= n { - return false, errorspkg.New("index overflow") + return false, errors.New("index overflow") } if capidxbit(rights.Rights[i]) != capidxbit(right) { - return false, errorspkg.New("index mismatch") + return false, errors.New("index mismatch") } if (rights.Rights[i] & right) != right { return false, nil diff --git a/vendor/golang.org/x/sys/unix/flock.go b/vendor/golang.org/x/sys/unix/fcntl.go similarity index 74% rename from vendor/golang.org/x/sys/unix/flock.go rename to vendor/golang.org/x/sys/unix/fcntl.go index 2994ce75f20..0c58c7e1e5a 100644 --- a/vendor/golang.org/x/sys/unix/flock.go +++ b/vendor/golang.org/x/sys/unix/fcntl.go @@ -12,6 +12,12 @@ import "unsafe" // systems by flock_linux_32bit.go to be SYS_FCNTL64. var fcntl64Syscall uintptr = SYS_FCNTL +// FcntlInt performs a fcntl syscall on fd with the provided command and argument. +func FcntlInt(fd uintptr, cmd, arg int) (int, error) { + valptr, _, err := Syscall(fcntl64Syscall, fd, uintptr(cmd), uintptr(arg)) + return int(valptr), err +} + // FcntlFlock performs a fcntl syscall for the F_GETLK, F_SETLK or F_SETLKW command. func FcntlFlock(fd uintptr, cmd int, lk *Flock_t) error { _, _, errno := Syscall(fcntl64Syscall, fd, uintptr(cmd), uintptr(unsafe.Pointer(lk))) diff --git a/vendor/golang.org/x/sys/unix/flock_linux_32bit.go b/vendor/golang.org/x/sys/unix/fcntl_linux_32bit.go similarity index 100% rename from vendor/golang.org/x/sys/unix/flock_linux_32bit.go rename to vendor/golang.org/x/sys/unix/fcntl_linux_32bit.go diff --git a/vendor/golang.org/x/sys/unix/mkerrors.sh b/vendor/golang.org/x/sys/unix/mkerrors.sh index 3b5e2c07b39..ddc50a018a6 100755 --- a/vendor/golang.org/x/sys/unix/mkerrors.sh +++ b/vendor/golang.org/x/sys/unix/mkerrors.sh @@ -50,6 +50,7 @@ includes_Darwin=' #include #include #include +#include #include #include #include @@ -171,6 +172,8 @@ struct ltchars { #include #include #include +#include +#include #include #include #include @@ -189,6 +192,7 @@ struct ltchars { #include #include #include +#include #include #include @@ -383,7 +387,8 @@ ccflags="$@" $2 ~ /^TC[IO](ON|OFF)$/ || $2 ~ /^IN_/ || $2 ~ /^LOCK_(SH|EX|NB|UN)$/ || - $2 ~ /^(AF|SOCK|SO|SOL|IPPROTO|IP|IPV6|ICMP6|TCP|EVFILT|NOTE|EV|SHUT|PROT|MAP|PACKET|MSG|SCM|MCL|DT|MADV|PR)_/ || + $2 ~ /^(AF|SOCK|SO|SOL|IPPROTO|IP|IPV6|ICMP6|TCP|EVFILT|NOTE|EV|SHUT|PROT|MAP|T?PACKET|MSG|SCM|MCL|DT|MADV|PR)_/ || + $2 ~ /^TP_STATUS_/ || $2 ~ /^FALLOC_/ || $2 == "ICMPV6_FILTER" || $2 == "SOMAXCONN" || @@ -425,6 +430,8 @@ ccflags="$@" $2 ~ /^PERF_EVENT_IOC_/ || $2 ~ /^SECCOMP_MODE_/ || $2 ~ /^SPLICE_/ || + $2 !~ /^AUDIT_RECORD_MAGIC/ && + $2 ~ /^[A-Z0-9_]+_MAGIC2?$/ || $2 ~ /^(VM|VMADDR)_/ || $2 ~ /^IOCTL_VM_SOCKETS_/ || $2 ~ /^(TASKSTATS|TS)_/ || @@ -432,10 +439,12 @@ ccflags="$@" $2 ~ /^GENL_/ || $2 ~ /^STATX_/ || $2 ~ /^UTIME_/ || - $2 ~ /^XATTR_(CREATE|REPLACE)/ || + $2 ~ /^XATTR_(CREATE|REPLACE|NO(DEFAULT|FOLLOW|SECURITY)|SHOWCOMPRESSION)/ || $2 ~ /^ATTR_(BIT_MAP_COUNT|(CMN|VOL|FILE)_)/ || $2 ~ /^FSOPT_/ || $2 ~ /^WDIOC_/ || + $2 ~ /^NFN/ || + $2 ~ /^(HDIO|WIN|SMART)_/ || $2 !~ "WMESGLEN" && $2 ~ /^W[A-Z0-9]+$/ || $2 ~ /^BLK[A-Z]*(GET$|SET$|BUF$|PART$|SIZE)/ {printf("\t%s = C.%s\n", $2, $2)} @@ -505,21 +514,26 @@ echo ')' enum { A = 'A', Z = 'Z', a = 'a', z = 'z' }; // avoid need for single quotes below -int errors[] = { +struct tuple { + int num; + const char *name; +}; + +struct tuple errors[] = { " for i in $errors do - echo -E ' '$i, + echo -E ' {'$i', "'$i'" },' done echo -E " }; -int signals[] = { +struct tuple signals[] = { " for i in $signals do - echo -E ' '$i, + echo -E ' {'$i', "'$i'" },' done # Use -E because on some systems bash builtin interprets \n itself. @@ -527,9 +541,9 @@ int signals[] = { }; static int -intcmp(const void *a, const void *b) +tuplecmp(const void *a, const void *b) { - return *(int*)a - *(int*)b; + return ((struct tuple *)a)->num - ((struct tuple *)b)->num; } int @@ -539,26 +553,34 @@ main(void) char buf[1024], *p; printf("\n\n// Error table\n"); - printf("var errors = [...]string {\n"); - qsort(errors, nelem(errors), sizeof errors[0], intcmp); + printf("var errorList = [...]struct {\n"); + printf("\tnum syscall.Errno\n"); + printf("\tname string\n"); + printf("\tdesc string\n"); + printf("} {\n"); + qsort(errors, nelem(errors), sizeof errors[0], tuplecmp); for(i=0; i 0 && errors[i-1] == e) + e = errors[i].num; + if(i > 0 && errors[i-1].num == e) continue; strcpy(buf, strerror(e)); // lowercase first letter: Bad -> bad, but STREAM -> STREAM. if(A <= buf[0] && buf[0] <= Z && a <= buf[1] && buf[1] <= z) buf[0] += a - A; - printf("\t%d: \"%s\",\n", e, buf); + printf("\t{ %d, \"%s\", \"%s\" },\n", e, errors[i].name, buf); } printf("}\n\n"); printf("\n\n// Signal table\n"); - printf("var signals = [...]string {\n"); - qsort(signals, nelem(signals), sizeof signals[0], intcmp); + printf("var signalList = [...]struct {\n"); + printf("\tnum syscall.Signal\n"); + printf("\tname string\n"); + printf("\tdesc string\n"); + printf("} {\n"); + qsort(signals, nelem(signals), sizeof signals[0], tuplecmp); for(i=0; i 0 && signals[i-1] == e) + e = signals[i].num; + if(i > 0 && signals[i-1].num == e) continue; strcpy(buf, strsignal(e)); // lowercase first letter: Bad -> bad, but STREAM -> STREAM. @@ -568,7 +590,7 @@ main(void) p = strrchr(buf, ":"[0]); if(p) *p = '\0'; - printf("\t%d: \"%s\",\n", e, buf); + printf("\t{ %d, \"%s\", \"%s\" },\n", e, signals[i].name, buf); } printf("}\n\n"); diff --git a/vendor/golang.org/x/sys/unix/mkpost.go b/vendor/golang.org/x/sys/unix/mkpost.go index 23590bda366..7e5c22c4735 100644 --- a/vendor/golang.org/x/sys/unix/mkpost.go +++ b/vendor/golang.org/x/sys/unix/mkpost.go @@ -42,6 +42,10 @@ func main() { log.Fatal(err) } + // Intentionally export __val fields in Fsid and Sigset_t + valRegex := regexp.MustCompile(`type (Fsid|Sigset_t) struct {(\s+)X__val(\s+\S+\s+)}`) + b = valRegex.ReplaceAll(b, []byte("type $1 struct {${2}Val$3}")) + // If we have empty Ptrace structs, we should delete them. Only s390x emits // nonempty Ptrace structs. ptraceRexexp := regexp.MustCompile(`type Ptrace((Psw|Fpregs|Per) struct {\s*})`) @@ -69,12 +73,9 @@ func main() { removePaddingFieldsRegex := regexp.MustCompile(`Pad_cgo_\d+`) b = removePaddingFieldsRegex.ReplaceAll(b, []byte("_")) - // We refuse to export private fields on s390x - if goarch == "s390x" && goos == "linux" { - // Remove padding, hidden, or unused fields - removeFieldsRegex = regexp.MustCompile(`\bX_\S+`) - b = removeFieldsRegex.ReplaceAll(b, []byte("_")) - } + // Remove padding, hidden, or unused fields + removeFieldsRegex = regexp.MustCompile(`\b(X_\S+|Padding)`) + b = removeFieldsRegex.ReplaceAll(b, []byte("_")) // Remove the first line of warning from cgo b = b[bytes.IndexByte(b, '\n')+1:] diff --git a/vendor/golang.org/x/sys/unix/openbsd_pledge.go b/vendor/golang.org/x/sys/unix/openbsd_pledge.go index db4f72ea9c4..9b1e86a12ba 100644 --- a/vendor/golang.org/x/sys/unix/openbsd_pledge.go +++ b/vendor/golang.org/x/sys/unix/openbsd_pledge.go @@ -13,7 +13,7 @@ import ( ) const ( - SYS_PLEDGE = 108 + _SYS_PLEDGE = 108 ) // Pledge implements the pledge syscall. For more information see pledge(2). @@ -30,7 +30,7 @@ func Pledge(promises string, paths []string) error { } pathsUnsafe = unsafe.Pointer(&pathsPtr[0]) } - _, _, e := syscall.Syscall(SYS_PLEDGE, uintptr(promisesUnsafe), uintptr(pathsUnsafe), 0) + _, _, e := syscall.Syscall(_SYS_PLEDGE, uintptr(promisesUnsafe), uintptr(pathsUnsafe), 0) if e != 0 { return e } diff --git a/vendor/golang.org/x/sys/unix/syscall.go b/vendor/golang.org/x/sys/unix/syscall.go index 857d2a42d47..ef35fce8041 100644 --- a/vendor/golang.org/x/sys/unix/syscall.go +++ b/vendor/golang.org/x/sys/unix/syscall.go @@ -11,24 +11,27 @@ // system, set $GOOS and $GOARCH to the desired system. For example, if // you want to view documentation for freebsd/arm on linux/amd64, set $GOOS // to freebsd and $GOARCH to arm. +// // The primary use of this package is inside other packages that provide a more // portable interface to the system, such as "os", "time" and "net". Use // those packages rather than this one if you can. +// // For details of the functions and data types in this package consult // the manuals for the appropriate operating system. +// // These calls return err == nil to indicate success; otherwise // err represents an operating system error describing the failure and // holds a value of type syscall.Errno. package unix // import "golang.org/x/sys/unix" +import "strings" + // ByteSliceFromString returns a NUL-terminated slice of bytes // containing the text of s. If s contains a NUL byte at any // location, it returns (nil, EINVAL). func ByteSliceFromString(s string) ([]byte, error) { - for i := 0; i < len(s); i++ { - if s[i] == 0 { - return nil, EINVAL - } + if strings.IndexByte(s, 0) != -1 { + return nil, EINVAL } a := make([]byte, len(s)+1) copy(a, s) diff --git a/vendor/golang.org/x/sys/unix/syscall_bsd.go b/vendor/golang.org/x/sys/unix/syscall_bsd.go index d3903edebad..53fb8518237 100644 --- a/vendor/golang.org/x/sys/unix/syscall_bsd.go +++ b/vendor/golang.org/x/sys/unix/syscall_bsd.go @@ -311,47 +311,6 @@ func Getsockname(fd int) (sa Sockaddr, err error) { //sysnb socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) -func GetsockoptByte(fd, level, opt int) (value byte, err error) { - var n byte - vallen := _Socklen(1) - err = getsockopt(fd, level, opt, unsafe.Pointer(&n), &vallen) - return n, err -} - -func GetsockoptInet4Addr(fd, level, opt int) (value [4]byte, err error) { - vallen := _Socklen(4) - err = getsockopt(fd, level, opt, unsafe.Pointer(&value[0]), &vallen) - return value, err -} - -func GetsockoptIPMreq(fd, level, opt int) (*IPMreq, error) { - var value IPMreq - vallen := _Socklen(SizeofIPMreq) - err := getsockopt(fd, level, opt, unsafe.Pointer(&value), &vallen) - return &value, err -} - -func GetsockoptIPv6Mreq(fd, level, opt int) (*IPv6Mreq, error) { - var value IPv6Mreq - vallen := _Socklen(SizeofIPv6Mreq) - err := getsockopt(fd, level, opt, unsafe.Pointer(&value), &vallen) - return &value, err -} - -func GetsockoptIPv6MTUInfo(fd, level, opt int) (*IPv6MTUInfo, error) { - var value IPv6MTUInfo - vallen := _Socklen(SizeofIPv6MTUInfo) - err := getsockopt(fd, level, opt, unsafe.Pointer(&value), &vallen) - return &value, err -} - -func GetsockoptICMPv6Filter(fd, level, opt int) (*ICMPv6Filter, error) { - var value ICMPv6Filter - vallen := _Socklen(SizeofICMPv6Filter) - err := getsockopt(fd, level, opt, unsafe.Pointer(&value), &vallen) - return &value, err -} - // GetsockoptString returns the string value of the socket option opt for the // socket associated with fd at the given socket level. func GetsockoptString(fd, level, opt int) (string, error) { diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin.go b/vendor/golang.org/x/sys/unix/syscall_darwin.go index b9598694c6b..79e94767deb 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin.go @@ -13,7 +13,7 @@ package unix import ( - errorspkg "errors" + "errors" "syscall" "unsafe" ) @@ -98,7 +98,7 @@ type attrList struct { func getAttrList(path string, attrList attrList, attrBuf []byte, options uint) (attrs [][]byte, err error) { if len(attrBuf) < 4 { - return nil, errorspkg.New("attrBuf too small") + return nil, errors.New("attrBuf too small") } attrList.bitmapCount = attrBitMapCount @@ -134,12 +134,12 @@ func getAttrList(path string, attrList attrList, attrBuf []byte, options uint) ( for i := uint32(0); int(i) < len(dat); { header := dat[i:] if len(header) < 8 { - return attrs, errorspkg.New("truncated attribute header") + return attrs, errors.New("truncated attribute header") } datOff := *(*int32)(unsafe.Pointer(&header[0])) attrLen := *(*uint32)(unsafe.Pointer(&header[4])) if datOff < 0 || uint32(datOff)+attrLen > uint32(len(dat)) { - return attrs, errorspkg.New("truncated results; attrBuf too small") + return attrs, errors.New("truncated results; attrBuf too small") } end := uint32(datOff) + attrLen attrs = append(attrs, dat[datOff:end]) @@ -176,6 +176,88 @@ func Getfsstat(buf []Statfs_t, flags int) (n int, err error) { return } +func xattrPointer(dest []byte) *byte { + // It's only when dest is set to NULL that the OS X implementations of + // getxattr() and listxattr() return the current sizes of the named attributes. + // An empty byte array is not sufficient. To maintain the same behaviour as the + // linux implementation, we wrap around the system calls and pass in NULL when + // dest is empty. + var destp *byte + if len(dest) > 0 { + destp = &dest[0] + } + return destp +} + +//sys getxattr(path string, attr string, dest *byte, size int, position uint32, options int) (sz int, err error) + +func Getxattr(path string, attr string, dest []byte) (sz int, err error) { + return getxattr(path, attr, xattrPointer(dest), len(dest), 0, 0) +} + +func Lgetxattr(link string, attr string, dest []byte) (sz int, err error) { + return getxattr(link, attr, xattrPointer(dest), len(dest), 0, XATTR_NOFOLLOW) +} + +//sys setxattr(path string, attr string, data *byte, size int, position uint32, options int) (err error) + +func Setxattr(path string, attr string, data []byte, flags int) (err error) { + // The parameters for the OS X implementation vary slightly compared to the + // linux system call, specifically the position parameter: + // + // linux: + // int setxattr( + // const char *path, + // const char *name, + // const void *value, + // size_t size, + // int flags + // ); + // + // darwin: + // int setxattr( + // const char *path, + // const char *name, + // void *value, + // size_t size, + // u_int32_t position, + // int options + // ); + // + // position specifies the offset within the extended attribute. In the + // current implementation, only the resource fork extended attribute makes + // use of this argument. For all others, position is reserved. We simply + // default to setting it to zero. + return setxattr(path, attr, xattrPointer(data), len(data), 0, flags) +} + +func Lsetxattr(link string, attr string, data []byte, flags int) (err error) { + return setxattr(link, attr, xattrPointer(data), len(data), 0, flags|XATTR_NOFOLLOW) +} + +//sys removexattr(path string, attr string, options int) (err error) + +func Removexattr(path string, attr string) (err error) { + // We wrap around and explicitly zero out the options provided to the OS X + // implementation of removexattr, we do so for interoperability with the + // linux variant. + return removexattr(path, attr, 0) +} + +func Lremovexattr(link string, attr string) (err error) { + return removexattr(link, attr, XATTR_NOFOLLOW) +} + +//sys listxattr(path string, dest *byte, size int, options int) (sz int, err error) + +func Listxattr(path string, dest []byte) (sz int, err error) { + return listxattr(path, xattrPointer(dest), len(dest), 0) +} + +func Llistxattr(link string, dest []byte) (sz int, err error) { + return listxattr(link, xattrPointer(dest), len(dest), XATTR_NOFOLLOW) +} + func setattrlistTimes(path string, times []Timespec, flags int) error { _p0, err := BytePtrFromString(path) if err != nil { @@ -330,6 +412,7 @@ func Uname(uname *Utsname) error { //sys Flock(fd int, how int) (err error) //sys Fpathconf(fd int, name int) (val int, err error) //sys Fstat(fd int, stat *Stat_t) (err error) = SYS_FSTAT64 +//sys Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) = SYS_FSTATAT64 //sys Fstatfs(fd int, stat *Statfs_t) (err error) = SYS_FSTATFS64 //sys Fsync(fd int) (err error) //sys Ftruncate(fd int, length int64) (err error) @@ -446,13 +529,9 @@ func Uname(uname *Utsname) error { // Watchevent // Waitevent // Modwatch -// Getxattr // Fgetxattr -// Setxattr // Fsetxattr -// Removexattr // Fremovexattr -// Listxattr // Flistxattr // Fsctl // Initgroups diff --git a/vendor/golang.org/x/sys/unix/syscall_dragonfly.go b/vendor/golang.org/x/sys/unix/syscall_dragonfly.go index 777860bf09f..b5072de2853 100644 --- a/vendor/golang.org/x/sys/unix/syscall_dragonfly.go +++ b/vendor/golang.org/x/sys/unix/syscall_dragonfly.go @@ -251,10 +251,12 @@ func Uname(uname *Utsname) error { //sys Fchdir(fd int) (err error) //sys Fchflags(fd int, flags int) (err error) //sys Fchmod(fd int, mode uint32) (err error) +//sys Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) //sys Fchown(fd int, uid int, gid int) (err error) //sys Flock(fd int, how int) (err error) //sys Fpathconf(fd int, name int) (val int, err error) //sys Fstat(fd int, stat *Stat_t) (err error) +//sys Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) //sys Fstatfs(fd int, stat *Statfs_t) (err error) //sys Fsync(fd int) (err error) //sys Ftruncate(fd int, length int64) (err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd.go b/vendor/golang.org/x/sys/unix/syscall_freebsd.go index 89f2c3fc17c..ba9df4ac126 100644 --- a/vendor/golang.org/x/sys/unix/syscall_freebsd.go +++ b/vendor/golang.org/x/sys/unix/syscall_freebsd.go @@ -12,7 +12,10 @@ package unix -import "unsafe" +import ( + "strings" + "unsafe" +) // SockaddrDatalink implements the Sockaddr interface for AF_LINK type sockets. type SockaddrDatalink struct { @@ -134,14 +137,7 @@ func setattrlistTimes(path string, times []Timespec, flags int) error { // Derive extattr namespace and attribute name func xattrnamespace(fullattr string) (ns int, attr string, err error) { - s := -1 - for idx, val := range fullattr { - if val == '.' { - s = idx - break - } - } - + s := strings.IndexByte(fullattr, '.') if s == -1 { return -1, "", ENOATTR } @@ -482,6 +478,7 @@ func Uname(uname *Utsname) error { //sys Flock(fd int, how int) (err error) //sys Fpathconf(fd int, name int) (val int, err error) //sys Fstat(fd int, stat *Stat_t) (err error) +//sys Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) //sys Fstatfs(fd int, stat *Statfs_t) (err error) //sys Fsync(fd int) (err error) //sys Ftruncate(fd int, length int64) (err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_linux.go b/vendor/golang.org/x/sys/unix/syscall_linux.go index 76cf81f5796..04f38c53ee1 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux.go @@ -782,19 +782,6 @@ func Getsockname(fd int) (sa Sockaddr, err error) { return anyToSockaddr(&rsa) } -func GetsockoptInet4Addr(fd, level, opt int) (value [4]byte, err error) { - vallen := _Socklen(4) - err = getsockopt(fd, level, opt, unsafe.Pointer(&value[0]), &vallen) - return value, err -} - -func GetsockoptIPMreq(fd, level, opt int) (*IPMreq, error) { - var value IPMreq - vallen := _Socklen(SizeofIPMreq) - err := getsockopt(fd, level, opt, unsafe.Pointer(&value), &vallen) - return &value, err -} - func GetsockoptIPMreqn(fd, level, opt int) (*IPMreqn, error) { var value IPMreqn vallen := _Socklen(SizeofIPMreqn) @@ -802,27 +789,6 @@ func GetsockoptIPMreqn(fd, level, opt int) (*IPMreqn, error) { return &value, err } -func GetsockoptIPv6Mreq(fd, level, opt int) (*IPv6Mreq, error) { - var value IPv6Mreq - vallen := _Socklen(SizeofIPv6Mreq) - err := getsockopt(fd, level, opt, unsafe.Pointer(&value), &vallen) - return &value, err -} - -func GetsockoptIPv6MTUInfo(fd, level, opt int) (*IPv6MTUInfo, error) { - var value IPv6MTUInfo - vallen := _Socklen(SizeofIPv6MTUInfo) - err := getsockopt(fd, level, opt, unsafe.Pointer(&value), &vallen) - return &value, err -} - -func GetsockoptICMPv6Filter(fd, level, opt int) (*ICMPv6Filter, error) { - var value ICMPv6Filter - vallen := _Socklen(SizeofICMPv6Filter) - err := getsockopt(fd, level, opt, unsafe.Pointer(&value), &vallen) - return &value, err -} - func GetsockoptUcred(fd, level, opt int) (*Ucred, error) { var value Ucred vallen := _Socklen(SizeofUcred) @@ -978,15 +944,17 @@ func Recvmsg(fd int, p, oob []byte, flags int) (n, oobn int, recvflags int, from } var dummy byte if len(oob) > 0 { - var sockType int - sockType, err = GetsockoptInt(fd, SOL_SOCKET, SO_TYPE) - if err != nil { - return - } - // receive at least one normal byte - if sockType != SOCK_DGRAM && len(p) == 0 { - iov.Base = &dummy - iov.SetLen(1) + if len(p) == 0 { + var sockType int + sockType, err = GetsockoptInt(fd, SOL_SOCKET, SO_TYPE) + if err != nil { + return + } + // receive at least one normal byte + if sockType != SOCK_DGRAM { + iov.Base = &dummy + iov.SetLen(1) + } } msg.Control = &oob[0] msg.SetControllen(len(oob)) @@ -1030,15 +998,17 @@ func SendmsgN(fd int, p, oob []byte, to Sockaddr, flags int) (n int, err error) } var dummy byte if len(oob) > 0 { - var sockType int - sockType, err = GetsockoptInt(fd, SOL_SOCKET, SO_TYPE) - if err != nil { - return 0, err - } - // send at least one normal byte - if sockType != SOCK_DGRAM && len(p) == 0 { - iov.Base = &dummy - iov.SetLen(1) + if len(p) == 0 { + var sockType int + sockType, err = GetsockoptInt(fd, SOL_SOCKET, SO_TYPE) + if err != nil { + return 0, err + } + // send at least one normal byte + if sockType != SOCK_DGRAM { + iov.Base = &dummy + iov.SetLen(1) + } } msg.Control = &oob[0] msg.SetControllen(len(oob)) @@ -1294,6 +1264,7 @@ func Getpgrp() (pid int) { //sys Mkdirat(dirfd int, path string, mode uint32) (err error) //sys Mknodat(dirfd int, path string, mode uint32, dev int) (err error) //sys Nanosleep(time *Timespec, leftover *Timespec) (err error) +//sys PerfEventOpen(attr *PerfEventAttr, pid int, cpu int, groupFd int, flags int) (fd int, err error) //sys PivotRoot(newroot string, putold string) (err error) = SYS_PIVOT_ROOT //sysnb prlimit(pid int, resource int, newlimit *Rlimit, old *Rlimit) (err error) = SYS_PRLIMIT64 //sys Prctl(option int, arg2 uintptr, arg3 uintptr, arg4 uintptr, arg5 uintptr) (err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go b/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go index 53d38a53428..d121106323e 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go @@ -29,7 +29,15 @@ package unix //sys Pread(fd int, p []byte, offset int64) (n int, err error) = SYS_PREAD64 //sys Pwrite(fd int, p []byte, offset int64) (n int, err error) = SYS_PWRITE64 //sys Seek(fd int, offset int64, whence int) (off int64, err error) = SYS_LSEEK -//sys Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) + +func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) { + var ts *Timespec + if timeout != nil { + ts = &Timespec{Sec: timeout.Sec, Nsec: timeout.Usec * 1000} + } + return Pselect(nfd, r, w, e, ts, nil) +} + //sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) //sys Setfsgid(gid int) (err error) //sys Setfsuid(uid int) (err error) @@ -40,7 +48,12 @@ package unix //sysnb Setreuid(ruid int, euid int) (err error) //sys Shutdown(fd int, how int) (err error) //sys Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error) -//sys Stat(path string, stat *Stat_t) (err error) + +func Stat(path string, stat *Stat_t) (err error) { + // Use fstatat, because Android's seccomp policy blocks stat. + return Fstatat(AT_FDCWD, path, stat, 0) +} + //sys Statfs(path string, buf *Statfs_t) (err error) //sys SyncFileRange(fd int, off int64, n int64, flags int) (err error) //sys Truncate(path string, length int64) (err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go b/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go index c464783d85e..a1e8a609b26 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go @@ -7,6 +7,7 @@ package unix //sys EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) = SYS_EPOLL_PWAIT +//sys Fadvise(fd int, offset int64, length int64, advice int) (err error) = SYS_FADVISE64 //sys Fchown(fd int, uid int, gid int) (err error) //sys Fstat(fd int, stat *Stat_t) (err error) //sys Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_gccgo.go b/vendor/golang.org/x/sys/unix/syscall_linux_gccgo.go new file mode 100644 index 00000000000..df9c1237181 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/syscall_linux_gccgo.go @@ -0,0 +1,21 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux +// +build gccgo +// +build 386 arm + +package unix + +import ( + "syscall" + "unsafe" +) + +func seek(fd int, offset int64, whence int) (newoffset int64, err syscall.Errno) { + offsetLow := uint32(offset & 0xffffffff) + offsetHigh := uint32((offset >> 32) & 0xffffffff) + _, _, err = Syscall6(SYS__LLSEEK, uintptr(fd), uintptr(offsetHigh), uintptr(offsetLow), uintptr(unsafe.Pointer(&newoffset)), uintptr(whence), 0) + return newoffset, err +} diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go b/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go index 15a69cbdde4..090ed404ab8 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go @@ -9,6 +9,7 @@ package unix //sys Dup2(oldfd int, newfd int) (err error) //sys EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) +//sys Fadvise(fd int, offset int64, length int64, advice int) (err error) = SYS_FADVISE64 //sys Fchown(fd int, uid int, gid int) (err error) //sys Fstatat(dirfd int, path string, stat *Stat_t, flags int) (err error) = SYS_NEWFSTATAT //sys Fstatfs(fd int, buf *Statfs_t) (err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go b/vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go index 40b8e4f0fcd..9e16cc9d14e 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go @@ -15,6 +15,7 @@ import ( func Syscall9(trap, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err syscall.Errno) //sys Dup2(oldfd int, newfd int) (err error) +//sys Fadvise(fd int, offset int64, length int64, advice int) (err error) = SYS_FADVISE64 //sys Fchown(fd int, uid int, gid int) (err error) //sys Ftruncate(fd int, length int64) (err error) = SYS_FTRUNCATE64 //sysnb Getegid() (egid int) @@ -35,7 +36,7 @@ func Syscall9(trap, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, //sysnb Setreuid(ruid int, euid int) (err error) //sys Shutdown(fd int, how int) (err error) -//sys Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error) +//sys Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int, err error) //sys SyncFileRange(fd int, off int64, n int64, flags int) (err error) //sys Truncate(path string, length int64) (err error) = SYS_TRUNCATE64 diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go b/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go index 17c9116e811..6fb8733d674 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go @@ -9,6 +9,7 @@ package unix //sys EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) //sys Dup2(oldfd int, newfd int) (err error) +//sys Fadvise(fd int, offset int64, length int64, advice int) (err error) = SYS_FADVISE64 //sys Fchown(fd int, uid int, gid int) (err error) //sys Fstat(fd int, stat *Stat_t) (err error) //sys Fstatat(dirfd int, path string, stat *Stat_t, flags int) (err error) = SYS_NEWFSTATAT diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go b/vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go index a00f992798d..78c1e0df1db 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go @@ -7,6 +7,7 @@ package unix //sys EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) +//sys Fadvise(fd int, offset int64, length int64, advice int) (err error) = SYS_FADVISE64 //sys Dup2(oldfd int, newfd int) (err error) //sys Fchown(fd int, uid int, gid int) (err error) //sys Fstat(fd int, stat *Stat_t) (err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_netbsd.go b/vendor/golang.org/x/sys/unix/syscall_netbsd.go index 71b7078380e..e1a3baa237d 100644 --- a/vendor/golang.org/x/sys/unix/syscall_netbsd.go +++ b/vendor/golang.org/x/sys/unix/syscall_netbsd.go @@ -233,13 +233,16 @@ func Uname(uname *Utsname) error { //sys Dup(fd int) (nfd int, err error) //sys Dup2(from int, to int) (err error) //sys Exit(code int) +//sys Fadvise(fd int, offset int64, length int64, advice int) (err error) = SYS_POSIX_FADVISE //sys Fchdir(fd int) (err error) //sys Fchflags(fd int, flags int) (err error) //sys Fchmod(fd int, mode uint32) (err error) +//sys Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) //sys Fchown(fd int, uid int, gid int) (err error) //sys Flock(fd int, how int) (err error) //sys Fpathconf(fd int, name int) (val int, err error) //sys Fstat(fd int, stat *Stat_t) (err error) +//sys Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) //sys Fsync(fd int) (err error) //sys Ftruncate(fd int, length int64) (err error) //sysnb Getegid() (egid int) @@ -320,7 +323,6 @@ func Uname(uname *Utsname) error { // __msync13 // __ntp_gettime30 // __posix_chown -// __posix_fadvise50 // __posix_fchown // __posix_lchown // __posix_rename diff --git a/vendor/golang.org/x/sys/unix/syscall_openbsd.go b/vendor/golang.org/x/sys/unix/syscall_openbsd.go index 37556e775df..614fcf04945 100644 --- a/vendor/golang.org/x/sys/unix/syscall_openbsd.go +++ b/vendor/golang.org/x/sys/unix/syscall_openbsd.go @@ -204,10 +204,12 @@ func Uname(uname *Utsname) error { //sys Fchdir(fd int) (err error) //sys Fchflags(fd int, flags int) (err error) //sys Fchmod(fd int, mode uint32) (err error) +//sys Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) //sys Fchown(fd int, uid int, gid int) (err error) //sys Flock(fd int, how int) (err error) //sys Fpathconf(fd int, name int) (val int, err error) //sys Fstat(fd int, stat *Stat_t) (err error) +//sys Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) //sys Fstatfs(fd int, stat *Statfs_t) (err error) //sys Fsync(fd int) (err error) //sys Ftruncate(fd int, length int64) (err error) @@ -220,6 +222,7 @@ func Uname(uname *Utsname) error { //sysnb Getppid() (ppid int) //sys Getpriority(which int, who int) (prio int, err error) //sysnb Getrlimit(which int, lim *Rlimit) (err error) +//sysnb Getrtable() (rtable int, err error) //sysnb Getrusage(who int, rusage *Rusage) (err error) //sysnb Getsid(pid int) (sid int, err error) //sysnb Gettimeofday(tv *Timeval) (err error) @@ -257,6 +260,7 @@ func Uname(uname *Utsname) error { //sysnb Setresgid(rgid int, egid int, sgid int) (err error) //sysnb Setresuid(ruid int, euid int, suid int) (err error) //sysnb Setrlimit(which int, lim *Rlimit) (err error) +//sysnb Setrtable(rtable int) (err error) //sysnb Setsid() (pid int, err error) //sysnb Settimeofday(tp *Timeval) (err error) //sysnb Setuid(uid int) (err error) @@ -305,7 +309,6 @@ func Uname(uname *Utsname) error { // getlogin // getresgid // getresuid -// getrtable // getthrid // ktrace // lfs_bmapv @@ -341,7 +344,6 @@ func Uname(uname *Utsname) error { // semop // setgroups // setitimer -// setrtable // setsockopt // shmat // shmctl diff --git a/vendor/golang.org/x/sys/unix/syscall_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/syscall_openbsd_amd64.go index 649e67fccc5..9a35334cba4 100644 --- a/vendor/golang.org/x/sys/unix/syscall_openbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/syscall_openbsd_amd64.go @@ -31,3 +31,7 @@ func (msghdr *Msghdr) SetControllen(length int) { func (cmsg *Cmsghdr) SetLen(length int) { cmsg.Len = uint32(length) } + +// SYS___SYSCTL is used by syscall_bsd.go for all BSDs, but in modern versions +// of openbsd/amd64 the syscall is called sysctl instead of __sysctl. +const SYS___SYSCTL = SYS_SYSCTL diff --git a/vendor/golang.org/x/sys/unix/syscall_solaris.go b/vendor/golang.org/x/sys/unix/syscall_solaris.go index eca8d1d09dc..b7629529b38 100644 --- a/vendor/golang.org/x/sys/unix/syscall_solaris.go +++ b/vendor/golang.org/x/sys/unix/syscall_solaris.go @@ -312,6 +312,12 @@ func UtimesNanoAt(dirfd int, path string, ts []Timespec, flags int) error { //sys fcntl(fd int, cmd int, arg int) (val int, err error) +// FcntlInt performs a fcntl syscall on fd with the provided command and argument. +func FcntlInt(fd uintptr, cmd, arg int) (int, error) { + valptr, _, err := sysvicall6(uintptr(unsafe.Pointer(&procfcntl)), 3, uintptr(fd), uintptr(cmd), uintptr(arg), 0, 0, 0) + return int(valptr), err +} + // FcntlFlock performs a fcntl syscall for the F_GETLK, F_SETLK or F_SETLKW command. func FcntlFlock(fd uintptr, cmd int, lk *Flock_t) error { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procfcntl)), 3, uintptr(fd), uintptr(cmd), uintptr(unsafe.Pointer(lk)), 0, 0, 0) @@ -595,9 +601,10 @@ func Poll(fds []PollFd, timeout int) (n int, err error) { //sys Fchown(fd int, uid int, gid int) (err error) //sys Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) //sys Fdatasync(fd int) (err error) -//sys Flock(fd int, how int) (err error) +//sys Flock(fd int, how int) (err error) //sys Fpathconf(fd int, name int) (val int, err error) //sys Fstat(fd int, stat *Stat_t) (err error) +//sys Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) //sys Fstatvfs(fd int, vfsstat *Statvfs_t) (err error) //sys Getdents(fd int, buf []byte, basep *uintptr) (n int, err error) //sysnb Getgid() (gid int) @@ -675,6 +682,7 @@ func Poll(fds []PollFd, timeout int) (n int, err error) { //sys connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) = libsocket.__xnet_connect //sys mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error) //sys munmap(addr uintptr, length uintptr) (err error) +//sys sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) = libsendfile.sendfile //sys sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) = libsocket.__xnet_sendto //sys socket(domain int, typ int, proto int) (fd int, err error) = libsocket.__xnet_socket //sysnb socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) = libsocket.__xnet_socketpair diff --git a/vendor/golang.org/x/sys/unix/syscall_solaris_amd64.go b/vendor/golang.org/x/sys/unix/syscall_solaris_amd64.go index 9d4e7a678f6..91c32ddf02a 100644 --- a/vendor/golang.org/x/sys/unix/syscall_solaris_amd64.go +++ b/vendor/golang.org/x/sys/unix/syscall_solaris_amd64.go @@ -21,8 +21,3 @@ func (iov *Iovec) SetLen(length int) { func (cmsg *Cmsghdr) SetLen(length int) { cmsg.Len = uint32(length) } - -func sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) { - // TODO(aram): implement this, see issue 5847. - panic("unimplemented") -} diff --git a/vendor/golang.org/x/sys/unix/syscall_unix.go b/vendor/golang.org/x/sys/unix/syscall_unix.go index cd8f3a9c286..b835bad0fe4 100644 --- a/vendor/golang.org/x/sys/unix/syscall_unix.go +++ b/vendor/golang.org/x/sys/unix/syscall_unix.go @@ -7,7 +7,9 @@ package unix import ( + "bytes" "runtime" + "sort" "sync" "syscall" "unsafe" @@ -50,14 +52,35 @@ func errnoErr(e syscall.Errno) error { return e } +// ErrnoName returns the error name for error number e. +func ErrnoName(e syscall.Errno) string { + i := sort.Search(len(errorList), func(i int) bool { + return errorList[i].num >= e + }) + if i < len(errorList) && errorList[i].num == e { + return errorList[i].name + } + return "" +} + +// SignalName returns the signal name for signal number s. +func SignalName(s syscall.Signal) string { + i := sort.Search(len(signalList), func(i int) bool { + return signalList[i].num >= s + }) + if i < len(signalList) && signalList[i].num == s { + return signalList[i].name + } + return "" +} + // clen returns the index of the first NULL byte in n or len(n) if n contains no NULL byte. func clen(n []byte) int { - for i := 0; i < len(n); i++ { - if n[i] == 0 { - return i - } + i := bytes.IndexByte(n, 0) + if i == -1 { + i = len(n) } - return len(n) + return i } // Mmap manager, for use by operating system-specific implementations. @@ -199,6 +222,13 @@ func Getpeername(fd int) (sa Sockaddr, err error) { return anyToSockaddr(&rsa) } +func GetsockoptByte(fd, level, opt int) (value byte, err error) { + var n byte + vallen := _Socklen(1) + err = getsockopt(fd, level, opt, unsafe.Pointer(&n), &vallen) + return n, err +} + func GetsockoptInt(fd, level, opt int) (value int, err error) { var n int32 vallen := _Socklen(4) @@ -206,6 +236,54 @@ func GetsockoptInt(fd, level, opt int) (value int, err error) { return int(n), err } +func GetsockoptInet4Addr(fd, level, opt int) (value [4]byte, err error) { + vallen := _Socklen(4) + err = getsockopt(fd, level, opt, unsafe.Pointer(&value[0]), &vallen) + return value, err +} + +func GetsockoptIPMreq(fd, level, opt int) (*IPMreq, error) { + var value IPMreq + vallen := _Socklen(SizeofIPMreq) + err := getsockopt(fd, level, opt, unsafe.Pointer(&value), &vallen) + return &value, err +} + +func GetsockoptIPv6Mreq(fd, level, opt int) (*IPv6Mreq, error) { + var value IPv6Mreq + vallen := _Socklen(SizeofIPv6Mreq) + err := getsockopt(fd, level, opt, unsafe.Pointer(&value), &vallen) + return &value, err +} + +func GetsockoptIPv6MTUInfo(fd, level, opt int) (*IPv6MTUInfo, error) { + var value IPv6MTUInfo + vallen := _Socklen(SizeofIPv6MTUInfo) + err := getsockopt(fd, level, opt, unsafe.Pointer(&value), &vallen) + return &value, err +} + +func GetsockoptICMPv6Filter(fd, level, opt int) (*ICMPv6Filter, error) { + var value ICMPv6Filter + vallen := _Socklen(SizeofICMPv6Filter) + err := getsockopt(fd, level, opt, unsafe.Pointer(&value), &vallen) + return &value, err +} + +func GetsockoptLinger(fd, level, opt int) (*Linger, error) { + var linger Linger + vallen := _Socklen(SizeofLinger) + err := getsockopt(fd, level, opt, unsafe.Pointer(&linger), &vallen) + return &linger, err +} + +func GetsockoptTimeval(fd, level, opt int) (*Timeval, error) { + var tv Timeval + vallen := _Socklen(unsafe.Sizeof(tv)) + err := getsockopt(fd, level, opt, unsafe.Pointer(&tv), &vallen) + return &tv, err +} + func Recvfrom(fd int, p []byte, flags int) (n int, from Sockaddr, err error) { var rsa RawSockaddrAny var len _Socklen = SizeofSockaddrAny @@ -305,3 +383,12 @@ func SetNonblock(fd int, nonblocking bool) (err error) { _, err = fcntl(fd, F_SETFL, flag) return err } + +// Exec calls execve(2), which replaces the calling executable in the process +// tree. argv0 should be the full path to an executable ("/bin/ls") and the +// executable name should also be the first argument in argv (["ls", "-l"]). +// envv are the environment variables that should be passed to the new +// process (["USER=go", "PWD=/tmp"]). +func Exec(argv0 string, argv []string, envv []string) error { + return syscall.Exec(argv0, argv, envv) +} diff --git a/vendor/golang.org/x/sys/unix/types_netbsd.go b/vendor/golang.org/x/sys/unix/types_netbsd.go index 10aa9b3a4f3..1494aafcbb5 100644 --- a/vendor/golang.org/x/sys/unix/types_netbsd.go +++ b/vendor/golang.org/x/sys/unix/types_netbsd.go @@ -118,6 +118,17 @@ const ( PathMax = C.PATH_MAX ) +// Advice to Fadvise + +const ( + FADV_NORMAL = C.POSIX_FADV_NORMAL + FADV_RANDOM = C.POSIX_FADV_RANDOM + FADV_SEQUENTIAL = C.POSIX_FADV_SEQUENTIAL + FADV_WILLNEED = C.POSIX_FADV_WILLNEED + FADV_DONTNEED = C.POSIX_FADV_DONTNEED + FADV_NOREUSE = C.POSIX_FADV_NOREUSE +) + // Sockets type RawSockaddrInet4 C.struct_sockaddr_in diff --git a/vendor/golang.org/x/sys/unix/zerrors_darwin_386.go b/vendor/golang.org/x/sys/unix/zerrors_darwin_386.go index dcba88424b1..3b39d7408ad 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_darwin_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_darwin_386.go @@ -1473,6 +1473,12 @@ const ( WORDSIZE = 0x20 WSTOPPED = 0x8 WUNTRACED = 0x2 + XATTR_CREATE = 0x2 + XATTR_NODEFAULT = 0x10 + XATTR_NOFOLLOW = 0x1 + XATTR_NOSECURITY = 0x8 + XATTR_REPLACE = 0x4 + XATTR_SHOWCOMPRESSION = 0x20 ) // Errors @@ -1624,146 +1630,154 @@ const ( ) // Error table -var errors = [...]string{ - 1: "operation not permitted", - 2: "no such file or directory", - 3: "no such process", - 4: "interrupted system call", - 5: "input/output error", - 6: "device not configured", - 7: "argument list too long", - 8: "exec format error", - 9: "bad file descriptor", - 10: "no child processes", - 11: "resource deadlock avoided", - 12: "cannot allocate memory", - 13: "permission denied", - 14: "bad address", - 15: "block device required", - 16: "resource busy", - 17: "file exists", - 18: "cross-device link", - 19: "operation not supported by device", - 20: "not a directory", - 21: "is a directory", - 22: "invalid argument", - 23: "too many open files in system", - 24: "too many open files", - 25: "inappropriate ioctl for device", - 26: "text file busy", - 27: "file too large", - 28: "no space left on device", - 29: "illegal seek", - 30: "read-only file system", - 31: "too many links", - 32: "broken pipe", - 33: "numerical argument out of domain", - 34: "result too large", - 35: "resource temporarily unavailable", - 36: "operation now in progress", - 37: "operation already in progress", - 38: "socket operation on non-socket", - 39: "destination address required", - 40: "message too long", - 41: "protocol wrong type for socket", - 42: "protocol not available", - 43: "protocol not supported", - 44: "socket type not supported", - 45: "operation not supported", - 46: "protocol family not supported", - 47: "address family not supported by protocol family", - 48: "address already in use", - 49: "can't assign requested address", - 50: "network is down", - 51: "network is unreachable", - 52: "network dropped connection on reset", - 53: "software caused connection abort", - 54: "connection reset by peer", - 55: "no buffer space available", - 56: "socket is already connected", - 57: "socket is not connected", - 58: "can't send after socket shutdown", - 59: "too many references: can't splice", - 60: "operation timed out", - 61: "connection refused", - 62: "too many levels of symbolic links", - 63: "file name too long", - 64: "host is down", - 65: "no route to host", - 66: "directory not empty", - 67: "too many processes", - 68: "too many users", - 69: "disc quota exceeded", - 70: "stale NFS file handle", - 71: "too many levels of remote in path", - 72: "RPC struct is bad", - 73: "RPC version wrong", - 74: "RPC prog. not avail", - 75: "program version wrong", - 76: "bad procedure for program", - 77: "no locks available", - 78: "function not implemented", - 79: "inappropriate file type or format", - 80: "authentication error", - 81: "need authenticator", - 82: "device power is off", - 83: "device error", - 84: "value too large to be stored in data type", - 85: "bad executable (or shared library)", - 86: "bad CPU type in executable", - 87: "shared library version mismatch", - 88: "malformed Mach-o file", - 89: "operation canceled", - 90: "identifier removed", - 91: "no message of desired type", - 92: "illegal byte sequence", - 93: "attribute not found", - 94: "bad message", - 95: "EMULTIHOP (Reserved)", - 96: "no message available on STREAM", - 97: "ENOLINK (Reserved)", - 98: "no STREAM resources", - 99: "not a STREAM", - 100: "protocol error", - 101: "STREAM ioctl timeout", - 102: "operation not supported on socket", - 103: "policy not found", - 104: "state not recoverable", - 105: "previous owner died", - 106: "interface output queue is full", +var errorList = [...]struct { + num syscall.Errno + name string + desc string +}{ + {1, "EPERM", "operation not permitted"}, + {2, "ENOENT", "no such file or directory"}, + {3, "ESRCH", "no such process"}, + {4, "EINTR", "interrupted system call"}, + {5, "EIO", "input/output error"}, + {6, "ENXIO", "device not configured"}, + {7, "E2BIG", "argument list too long"}, + {8, "ENOEXEC", "exec format error"}, + {9, "EBADF", "bad file descriptor"}, + {10, "ECHILD", "no child processes"}, + {11, "EDEADLK", "resource deadlock avoided"}, + {12, "ENOMEM", "cannot allocate memory"}, + {13, "EACCES", "permission denied"}, + {14, "EFAULT", "bad address"}, + {15, "ENOTBLK", "block device required"}, + {16, "EBUSY", "resource busy"}, + {17, "EEXIST", "file exists"}, + {18, "EXDEV", "cross-device link"}, + {19, "ENODEV", "operation not supported by device"}, + {20, "ENOTDIR", "not a directory"}, + {21, "EISDIR", "is a directory"}, + {22, "EINVAL", "invalid argument"}, + {23, "ENFILE", "too many open files in system"}, + {24, "EMFILE", "too many open files"}, + {25, "ENOTTY", "inappropriate ioctl for device"}, + {26, "ETXTBSY", "text file busy"}, + {27, "EFBIG", "file too large"}, + {28, "ENOSPC", "no space left on device"}, + {29, "ESPIPE", "illegal seek"}, + {30, "EROFS", "read-only file system"}, + {31, "EMLINK", "too many links"}, + {32, "EPIPE", "broken pipe"}, + {33, "EDOM", "numerical argument out of domain"}, + {34, "ERANGE", "result too large"}, + {35, "EAGAIN", "resource temporarily unavailable"}, + {36, "EINPROGRESS", "operation now in progress"}, + {37, "EALREADY", "operation already in progress"}, + {38, "ENOTSOCK", "socket operation on non-socket"}, + {39, "EDESTADDRREQ", "destination address required"}, + {40, "EMSGSIZE", "message too long"}, + {41, "EPROTOTYPE", "protocol wrong type for socket"}, + {42, "ENOPROTOOPT", "protocol not available"}, + {43, "EPROTONOSUPPORT", "protocol not supported"}, + {44, "ESOCKTNOSUPPORT", "socket type not supported"}, + {45, "ENOTSUP", "operation not supported"}, + {46, "EPFNOSUPPORT", "protocol family not supported"}, + {47, "EAFNOSUPPORT", "address family not supported by protocol family"}, + {48, "EADDRINUSE", "address already in use"}, + {49, "EADDRNOTAVAIL", "can't assign requested address"}, + {50, "ENETDOWN", "network is down"}, + {51, "ENETUNREACH", "network is unreachable"}, + {52, "ENETRESET", "network dropped connection on reset"}, + {53, "ECONNABORTED", "software caused connection abort"}, + {54, "ECONNRESET", "connection reset by peer"}, + {55, "ENOBUFS", "no buffer space available"}, + {56, "EISCONN", "socket is already connected"}, + {57, "ENOTCONN", "socket is not connected"}, + {58, "ESHUTDOWN", "can't send after socket shutdown"}, + {59, "ETOOMANYREFS", "too many references: can't splice"}, + {60, "ETIMEDOUT", "operation timed out"}, + {61, "ECONNREFUSED", "connection refused"}, + {62, "ELOOP", "too many levels of symbolic links"}, + {63, "ENAMETOOLONG", "file name too long"}, + {64, "EHOSTDOWN", "host is down"}, + {65, "EHOSTUNREACH", "no route to host"}, + {66, "ENOTEMPTY", "directory not empty"}, + {67, "EPROCLIM", "too many processes"}, + {68, "EUSERS", "too many users"}, + {69, "EDQUOT", "disc quota exceeded"}, + {70, "ESTALE", "stale NFS file handle"}, + {71, "EREMOTE", "too many levels of remote in path"}, + {72, "EBADRPC", "RPC struct is bad"}, + {73, "ERPCMISMATCH", "RPC version wrong"}, + {74, "EPROGUNAVAIL", "RPC prog. not avail"}, + {75, "EPROGMISMATCH", "program version wrong"}, + {76, "EPROCUNAVAIL", "bad procedure for program"}, + {77, "ENOLCK", "no locks available"}, + {78, "ENOSYS", "function not implemented"}, + {79, "EFTYPE", "inappropriate file type or format"}, + {80, "EAUTH", "authentication error"}, + {81, "ENEEDAUTH", "need authenticator"}, + {82, "EPWROFF", "device power is off"}, + {83, "EDEVERR", "device error"}, + {84, "EOVERFLOW", "value too large to be stored in data type"}, + {85, "EBADEXEC", "bad executable (or shared library)"}, + {86, "EBADARCH", "bad CPU type in executable"}, + {87, "ESHLIBVERS", "shared library version mismatch"}, + {88, "EBADMACHO", "malformed Mach-o file"}, + {89, "ECANCELED", "operation canceled"}, + {90, "EIDRM", "identifier removed"}, + {91, "ENOMSG", "no message of desired type"}, + {92, "EILSEQ", "illegal byte sequence"}, + {93, "ENOATTR", "attribute not found"}, + {94, "EBADMSG", "bad message"}, + {95, "EMULTIHOP", "EMULTIHOP (Reserved)"}, + {96, "ENODATA", "no message available on STREAM"}, + {97, "ENOLINK", "ENOLINK (Reserved)"}, + {98, "ENOSR", "no STREAM resources"}, + {99, "ENOSTR", "not a STREAM"}, + {100, "EPROTO", "protocol error"}, + {101, "ETIME", "STREAM ioctl timeout"}, + {102, "EOPNOTSUPP", "operation not supported on socket"}, + {103, "ENOPOLICY", "policy not found"}, + {104, "ENOTRECOVERABLE", "state not recoverable"}, + {105, "EOWNERDEAD", "previous owner died"}, + {106, "EQFULL", "interface output queue is full"}, } // Signal table -var signals = [...]string{ - 1: "hangup", - 2: "interrupt", - 3: "quit", - 4: "illegal instruction", - 5: "trace/BPT trap", - 6: "abort trap", - 7: "EMT trap", - 8: "floating point exception", - 9: "killed", - 10: "bus error", - 11: "segmentation fault", - 12: "bad system call", - 13: "broken pipe", - 14: "alarm clock", - 15: "terminated", - 16: "urgent I/O condition", - 17: "suspended (signal)", - 18: "suspended", - 19: "continued", - 20: "child exited", - 21: "stopped (tty input)", - 22: "stopped (tty output)", - 23: "I/O possible", - 24: "cputime limit exceeded", - 25: "filesize limit exceeded", - 26: "virtual timer expired", - 27: "profiling timer expired", - 28: "window size changes", - 29: "information request", - 30: "user defined signal 1", - 31: "user defined signal 2", +var signalList = [...]struct { + num syscall.Signal + name string + desc string +}{ + {1, "SIGHUP", "hangup"}, + {2, "SIGINT", "interrupt"}, + {3, "SIGQUIT", "quit"}, + {4, "SIGILL", "illegal instruction"}, + {5, "SIGTRAP", "trace/BPT trap"}, + {6, "SIGABRT", "abort trap"}, + {7, "SIGEMT", "EMT trap"}, + {8, "SIGFPE", "floating point exception"}, + {9, "SIGKILL", "killed"}, + {10, "SIGBUS", "bus error"}, + {11, "SIGSEGV", "segmentation fault"}, + {12, "SIGSYS", "bad system call"}, + {13, "SIGPIPE", "broken pipe"}, + {14, "SIGALRM", "alarm clock"}, + {15, "SIGTERM", "terminated"}, + {16, "SIGURG", "urgent I/O condition"}, + {17, "SIGSTOP", "suspended (signal)"}, + {18, "SIGTSTP", "suspended"}, + {19, "SIGCONT", "continued"}, + {20, "SIGCHLD", "child exited"}, + {21, "SIGTTIN", "stopped (tty input)"}, + {22, "SIGTTOU", "stopped (tty output)"}, + {23, "SIGIO", "I/O possible"}, + {24, "SIGXCPU", "cputime limit exceeded"}, + {25, "SIGXFSZ", "filesize limit exceeded"}, + {26, "SIGVTALRM", "virtual timer expired"}, + {27, "SIGPROF", "profiling timer expired"}, + {28, "SIGWINCH", "window size changes"}, + {29, "SIGINFO", "information request"}, + {30, "SIGUSR1", "user defined signal 1"}, + {31, "SIGUSR2", "user defined signal 2"}, } diff --git a/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go index 1a51c963c89..8fe5547775b 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go @@ -1473,6 +1473,12 @@ const ( WORDSIZE = 0x40 WSTOPPED = 0x8 WUNTRACED = 0x2 + XATTR_CREATE = 0x2 + XATTR_NODEFAULT = 0x10 + XATTR_NOFOLLOW = 0x1 + XATTR_NOSECURITY = 0x8 + XATTR_REPLACE = 0x4 + XATTR_SHOWCOMPRESSION = 0x20 ) // Errors @@ -1624,146 +1630,154 @@ const ( ) // Error table -var errors = [...]string{ - 1: "operation not permitted", - 2: "no such file or directory", - 3: "no such process", - 4: "interrupted system call", - 5: "input/output error", - 6: "device not configured", - 7: "argument list too long", - 8: "exec format error", - 9: "bad file descriptor", - 10: "no child processes", - 11: "resource deadlock avoided", - 12: "cannot allocate memory", - 13: "permission denied", - 14: "bad address", - 15: "block device required", - 16: "resource busy", - 17: "file exists", - 18: "cross-device link", - 19: "operation not supported by device", - 20: "not a directory", - 21: "is a directory", - 22: "invalid argument", - 23: "too many open files in system", - 24: "too many open files", - 25: "inappropriate ioctl for device", - 26: "text file busy", - 27: "file too large", - 28: "no space left on device", - 29: "illegal seek", - 30: "read-only file system", - 31: "too many links", - 32: "broken pipe", - 33: "numerical argument out of domain", - 34: "result too large", - 35: "resource temporarily unavailable", - 36: "operation now in progress", - 37: "operation already in progress", - 38: "socket operation on non-socket", - 39: "destination address required", - 40: "message too long", - 41: "protocol wrong type for socket", - 42: "protocol not available", - 43: "protocol not supported", - 44: "socket type not supported", - 45: "operation not supported", - 46: "protocol family not supported", - 47: "address family not supported by protocol family", - 48: "address already in use", - 49: "can't assign requested address", - 50: "network is down", - 51: "network is unreachable", - 52: "network dropped connection on reset", - 53: "software caused connection abort", - 54: "connection reset by peer", - 55: "no buffer space available", - 56: "socket is already connected", - 57: "socket is not connected", - 58: "can't send after socket shutdown", - 59: "too many references: can't splice", - 60: "operation timed out", - 61: "connection refused", - 62: "too many levels of symbolic links", - 63: "file name too long", - 64: "host is down", - 65: "no route to host", - 66: "directory not empty", - 67: "too many processes", - 68: "too many users", - 69: "disc quota exceeded", - 70: "stale NFS file handle", - 71: "too many levels of remote in path", - 72: "RPC struct is bad", - 73: "RPC version wrong", - 74: "RPC prog. not avail", - 75: "program version wrong", - 76: "bad procedure for program", - 77: "no locks available", - 78: "function not implemented", - 79: "inappropriate file type or format", - 80: "authentication error", - 81: "need authenticator", - 82: "device power is off", - 83: "device error", - 84: "value too large to be stored in data type", - 85: "bad executable (or shared library)", - 86: "bad CPU type in executable", - 87: "shared library version mismatch", - 88: "malformed Mach-o file", - 89: "operation canceled", - 90: "identifier removed", - 91: "no message of desired type", - 92: "illegal byte sequence", - 93: "attribute not found", - 94: "bad message", - 95: "EMULTIHOP (Reserved)", - 96: "no message available on STREAM", - 97: "ENOLINK (Reserved)", - 98: "no STREAM resources", - 99: "not a STREAM", - 100: "protocol error", - 101: "STREAM ioctl timeout", - 102: "operation not supported on socket", - 103: "policy not found", - 104: "state not recoverable", - 105: "previous owner died", - 106: "interface output queue is full", +var errorList = [...]struct { + num syscall.Errno + name string + desc string +}{ + {1, "EPERM", "operation not permitted"}, + {2, "ENOENT", "no such file or directory"}, + {3, "ESRCH", "no such process"}, + {4, "EINTR", "interrupted system call"}, + {5, "EIO", "input/output error"}, + {6, "ENXIO", "device not configured"}, + {7, "E2BIG", "argument list too long"}, + {8, "ENOEXEC", "exec format error"}, + {9, "EBADF", "bad file descriptor"}, + {10, "ECHILD", "no child processes"}, + {11, "EDEADLK", "resource deadlock avoided"}, + {12, "ENOMEM", "cannot allocate memory"}, + {13, "EACCES", "permission denied"}, + {14, "EFAULT", "bad address"}, + {15, "ENOTBLK", "block device required"}, + {16, "EBUSY", "resource busy"}, + {17, "EEXIST", "file exists"}, + {18, "EXDEV", "cross-device link"}, + {19, "ENODEV", "operation not supported by device"}, + {20, "ENOTDIR", "not a directory"}, + {21, "EISDIR", "is a directory"}, + {22, "EINVAL", "invalid argument"}, + {23, "ENFILE", "too many open files in system"}, + {24, "EMFILE", "too many open files"}, + {25, "ENOTTY", "inappropriate ioctl for device"}, + {26, "ETXTBSY", "text file busy"}, + {27, "EFBIG", "file too large"}, + {28, "ENOSPC", "no space left on device"}, + {29, "ESPIPE", "illegal seek"}, + {30, "EROFS", "read-only file system"}, + {31, "EMLINK", "too many links"}, + {32, "EPIPE", "broken pipe"}, + {33, "EDOM", "numerical argument out of domain"}, + {34, "ERANGE", "result too large"}, + {35, "EAGAIN", "resource temporarily unavailable"}, + {36, "EINPROGRESS", "operation now in progress"}, + {37, "EALREADY", "operation already in progress"}, + {38, "ENOTSOCK", "socket operation on non-socket"}, + {39, "EDESTADDRREQ", "destination address required"}, + {40, "EMSGSIZE", "message too long"}, + {41, "EPROTOTYPE", "protocol wrong type for socket"}, + {42, "ENOPROTOOPT", "protocol not available"}, + {43, "EPROTONOSUPPORT", "protocol not supported"}, + {44, "ESOCKTNOSUPPORT", "socket type not supported"}, + {45, "ENOTSUP", "operation not supported"}, + {46, "EPFNOSUPPORT", "protocol family not supported"}, + {47, "EAFNOSUPPORT", "address family not supported by protocol family"}, + {48, "EADDRINUSE", "address already in use"}, + {49, "EADDRNOTAVAIL", "can't assign requested address"}, + {50, "ENETDOWN", "network is down"}, + {51, "ENETUNREACH", "network is unreachable"}, + {52, "ENETRESET", "network dropped connection on reset"}, + {53, "ECONNABORTED", "software caused connection abort"}, + {54, "ECONNRESET", "connection reset by peer"}, + {55, "ENOBUFS", "no buffer space available"}, + {56, "EISCONN", "socket is already connected"}, + {57, "ENOTCONN", "socket is not connected"}, + {58, "ESHUTDOWN", "can't send after socket shutdown"}, + {59, "ETOOMANYREFS", "too many references: can't splice"}, + {60, "ETIMEDOUT", "operation timed out"}, + {61, "ECONNREFUSED", "connection refused"}, + {62, "ELOOP", "too many levels of symbolic links"}, + {63, "ENAMETOOLONG", "file name too long"}, + {64, "EHOSTDOWN", "host is down"}, + {65, "EHOSTUNREACH", "no route to host"}, + {66, "ENOTEMPTY", "directory not empty"}, + {67, "EPROCLIM", "too many processes"}, + {68, "EUSERS", "too many users"}, + {69, "EDQUOT", "disc quota exceeded"}, + {70, "ESTALE", "stale NFS file handle"}, + {71, "EREMOTE", "too many levels of remote in path"}, + {72, "EBADRPC", "RPC struct is bad"}, + {73, "ERPCMISMATCH", "RPC version wrong"}, + {74, "EPROGUNAVAIL", "RPC prog. not avail"}, + {75, "EPROGMISMATCH", "program version wrong"}, + {76, "EPROCUNAVAIL", "bad procedure for program"}, + {77, "ENOLCK", "no locks available"}, + {78, "ENOSYS", "function not implemented"}, + {79, "EFTYPE", "inappropriate file type or format"}, + {80, "EAUTH", "authentication error"}, + {81, "ENEEDAUTH", "need authenticator"}, + {82, "EPWROFF", "device power is off"}, + {83, "EDEVERR", "device error"}, + {84, "EOVERFLOW", "value too large to be stored in data type"}, + {85, "EBADEXEC", "bad executable (or shared library)"}, + {86, "EBADARCH", "bad CPU type in executable"}, + {87, "ESHLIBVERS", "shared library version mismatch"}, + {88, "EBADMACHO", "malformed Mach-o file"}, + {89, "ECANCELED", "operation canceled"}, + {90, "EIDRM", "identifier removed"}, + {91, "ENOMSG", "no message of desired type"}, + {92, "EILSEQ", "illegal byte sequence"}, + {93, "ENOATTR", "attribute not found"}, + {94, "EBADMSG", "bad message"}, + {95, "EMULTIHOP", "EMULTIHOP (Reserved)"}, + {96, "ENODATA", "no message available on STREAM"}, + {97, "ENOLINK", "ENOLINK (Reserved)"}, + {98, "ENOSR", "no STREAM resources"}, + {99, "ENOSTR", "not a STREAM"}, + {100, "EPROTO", "protocol error"}, + {101, "ETIME", "STREAM ioctl timeout"}, + {102, "EOPNOTSUPP", "operation not supported on socket"}, + {103, "ENOPOLICY", "policy not found"}, + {104, "ENOTRECOVERABLE", "state not recoverable"}, + {105, "EOWNERDEAD", "previous owner died"}, + {106, "EQFULL", "interface output queue is full"}, } // Signal table -var signals = [...]string{ - 1: "hangup", - 2: "interrupt", - 3: "quit", - 4: "illegal instruction", - 5: "trace/BPT trap", - 6: "abort trap", - 7: "EMT trap", - 8: "floating point exception", - 9: "killed", - 10: "bus error", - 11: "segmentation fault", - 12: "bad system call", - 13: "broken pipe", - 14: "alarm clock", - 15: "terminated", - 16: "urgent I/O condition", - 17: "suspended (signal)", - 18: "suspended", - 19: "continued", - 20: "child exited", - 21: "stopped (tty input)", - 22: "stopped (tty output)", - 23: "I/O possible", - 24: "cputime limit exceeded", - 25: "filesize limit exceeded", - 26: "virtual timer expired", - 27: "profiling timer expired", - 28: "window size changes", - 29: "information request", - 30: "user defined signal 1", - 31: "user defined signal 2", +var signalList = [...]struct { + num syscall.Signal + name string + desc string +}{ + {1, "SIGHUP", "hangup"}, + {2, "SIGINT", "interrupt"}, + {3, "SIGQUIT", "quit"}, + {4, "SIGILL", "illegal instruction"}, + {5, "SIGTRAP", "trace/BPT trap"}, + {6, "SIGABRT", "abort trap"}, + {7, "SIGEMT", "EMT trap"}, + {8, "SIGFPE", "floating point exception"}, + {9, "SIGKILL", "killed"}, + {10, "SIGBUS", "bus error"}, + {11, "SIGSEGV", "segmentation fault"}, + {12, "SIGSYS", "bad system call"}, + {13, "SIGPIPE", "broken pipe"}, + {14, "SIGALRM", "alarm clock"}, + {15, "SIGTERM", "terminated"}, + {16, "SIGURG", "urgent I/O condition"}, + {17, "SIGSTOP", "suspended (signal)"}, + {18, "SIGTSTP", "suspended"}, + {19, "SIGCONT", "continued"}, + {20, "SIGCHLD", "child exited"}, + {21, "SIGTTIN", "stopped (tty input)"}, + {22, "SIGTTOU", "stopped (tty output)"}, + {23, "SIGIO", "I/O possible"}, + {24, "SIGXCPU", "cputime limit exceeded"}, + {25, "SIGXFSZ", "filesize limit exceeded"}, + {26, "SIGVTALRM", "virtual timer expired"}, + {27, "SIGPROF", "profiling timer expired"}, + {28, "SIGWINCH", "window size changes"}, + {29, "SIGINFO", "information request"}, + {30, "SIGUSR1", "user defined signal 1"}, + {31, "SIGUSR2", "user defined signal 2"}, } diff --git a/vendor/golang.org/x/sys/unix/zerrors_darwin_arm.go b/vendor/golang.org/x/sys/unix/zerrors_darwin_arm.go index fa135b17c15..7a977770d0a 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_darwin_arm.go +++ b/vendor/golang.org/x/sys/unix/zerrors_darwin_arm.go @@ -1473,6 +1473,12 @@ const ( WORDSIZE = 0x40 WSTOPPED = 0x8 WUNTRACED = 0x2 + XATTR_CREATE = 0x2 + XATTR_NODEFAULT = 0x10 + XATTR_NOFOLLOW = 0x1 + XATTR_NOSECURITY = 0x8 + XATTR_REPLACE = 0x4 + XATTR_SHOWCOMPRESSION = 0x20 ) // Errors @@ -1624,146 +1630,154 @@ const ( ) // Error table -var errors = [...]string{ - 1: "operation not permitted", - 2: "no such file or directory", - 3: "no such process", - 4: "interrupted system call", - 5: "input/output error", - 6: "device not configured", - 7: "argument list too long", - 8: "exec format error", - 9: "bad file descriptor", - 10: "no child processes", - 11: "resource deadlock avoided", - 12: "cannot allocate memory", - 13: "permission denied", - 14: "bad address", - 15: "block device required", - 16: "resource busy", - 17: "file exists", - 18: "cross-device link", - 19: "operation not supported by device", - 20: "not a directory", - 21: "is a directory", - 22: "invalid argument", - 23: "too many open files in system", - 24: "too many open files", - 25: "inappropriate ioctl for device", - 26: "text file busy", - 27: "file too large", - 28: "no space left on device", - 29: "illegal seek", - 30: "read-only file system", - 31: "too many links", - 32: "broken pipe", - 33: "numerical argument out of domain", - 34: "result too large", - 35: "resource temporarily unavailable", - 36: "operation now in progress", - 37: "operation already in progress", - 38: "socket operation on non-socket", - 39: "destination address required", - 40: "message too long", - 41: "protocol wrong type for socket", - 42: "protocol not available", - 43: "protocol not supported", - 44: "socket type not supported", - 45: "operation not supported", - 46: "protocol family not supported", - 47: "address family not supported by protocol family", - 48: "address already in use", - 49: "can't assign requested address", - 50: "network is down", - 51: "network is unreachable", - 52: "network dropped connection on reset", - 53: "software caused connection abort", - 54: "connection reset by peer", - 55: "no buffer space available", - 56: "socket is already connected", - 57: "socket is not connected", - 58: "can't send after socket shutdown", - 59: "too many references: can't splice", - 60: "operation timed out", - 61: "connection refused", - 62: "too many levels of symbolic links", - 63: "file name too long", - 64: "host is down", - 65: "no route to host", - 66: "directory not empty", - 67: "too many processes", - 68: "too many users", - 69: "disc quota exceeded", - 70: "stale NFS file handle", - 71: "too many levels of remote in path", - 72: "RPC struct is bad", - 73: "RPC version wrong", - 74: "RPC prog. not avail", - 75: "program version wrong", - 76: "bad procedure for program", - 77: "no locks available", - 78: "function not implemented", - 79: "inappropriate file type or format", - 80: "authentication error", - 81: "need authenticator", - 82: "device power is off", - 83: "device error", - 84: "value too large to be stored in data type", - 85: "bad executable (or shared library)", - 86: "bad CPU type in executable", - 87: "shared library version mismatch", - 88: "malformed Mach-o file", - 89: "operation canceled", - 90: "identifier removed", - 91: "no message of desired type", - 92: "illegal byte sequence", - 93: "attribute not found", - 94: "bad message", - 95: "EMULTIHOP (Reserved)", - 96: "no message available on STREAM", - 97: "ENOLINK (Reserved)", - 98: "no STREAM resources", - 99: "not a STREAM", - 100: "protocol error", - 101: "STREAM ioctl timeout", - 102: "operation not supported on socket", - 103: "policy not found", - 104: "state not recoverable", - 105: "previous owner died", - 106: "interface output queue is full", +var errorList = [...]struct { + num syscall.Errno + name string + desc string +}{ + {1, "EPERM", "operation not permitted"}, + {2, "ENOENT", "no such file or directory"}, + {3, "ESRCH", "no such process"}, + {4, "EINTR", "interrupted system call"}, + {5, "EIO", "input/output error"}, + {6, "ENXIO", "device not configured"}, + {7, "E2BIG", "argument list too long"}, + {8, "ENOEXEC", "exec format error"}, + {9, "EBADF", "bad file descriptor"}, + {10, "ECHILD", "no child processes"}, + {11, "EDEADLK", "resource deadlock avoided"}, + {12, "ENOMEM", "cannot allocate memory"}, + {13, "EACCES", "permission denied"}, + {14, "EFAULT", "bad address"}, + {15, "ENOTBLK", "block device required"}, + {16, "EBUSY", "resource busy"}, + {17, "EEXIST", "file exists"}, + {18, "EXDEV", "cross-device link"}, + {19, "ENODEV", "operation not supported by device"}, + {20, "ENOTDIR", "not a directory"}, + {21, "EISDIR", "is a directory"}, + {22, "EINVAL", "invalid argument"}, + {23, "ENFILE", "too many open files in system"}, + {24, "EMFILE", "too many open files"}, + {25, "ENOTTY", "inappropriate ioctl for device"}, + {26, "ETXTBSY", "text file busy"}, + {27, "EFBIG", "file too large"}, + {28, "ENOSPC", "no space left on device"}, + {29, "ESPIPE", "illegal seek"}, + {30, "EROFS", "read-only file system"}, + {31, "EMLINK", "too many links"}, + {32, "EPIPE", "broken pipe"}, + {33, "EDOM", "numerical argument out of domain"}, + {34, "ERANGE", "result too large"}, + {35, "EAGAIN", "resource temporarily unavailable"}, + {36, "EINPROGRESS", "operation now in progress"}, + {37, "EALREADY", "operation already in progress"}, + {38, "ENOTSOCK", "socket operation on non-socket"}, + {39, "EDESTADDRREQ", "destination address required"}, + {40, "EMSGSIZE", "message too long"}, + {41, "EPROTOTYPE", "protocol wrong type for socket"}, + {42, "ENOPROTOOPT", "protocol not available"}, + {43, "EPROTONOSUPPORT", "protocol not supported"}, + {44, "ESOCKTNOSUPPORT", "socket type not supported"}, + {45, "ENOTSUP", "operation not supported"}, + {46, "EPFNOSUPPORT", "protocol family not supported"}, + {47, "EAFNOSUPPORT", "address family not supported by protocol family"}, + {48, "EADDRINUSE", "address already in use"}, + {49, "EADDRNOTAVAIL", "can't assign requested address"}, + {50, "ENETDOWN", "network is down"}, + {51, "ENETUNREACH", "network is unreachable"}, + {52, "ENETRESET", "network dropped connection on reset"}, + {53, "ECONNABORTED", "software caused connection abort"}, + {54, "ECONNRESET", "connection reset by peer"}, + {55, "ENOBUFS", "no buffer space available"}, + {56, "EISCONN", "socket is already connected"}, + {57, "ENOTCONN", "socket is not connected"}, + {58, "ESHUTDOWN", "can't send after socket shutdown"}, + {59, "ETOOMANYREFS", "too many references: can't splice"}, + {60, "ETIMEDOUT", "operation timed out"}, + {61, "ECONNREFUSED", "connection refused"}, + {62, "ELOOP", "too many levels of symbolic links"}, + {63, "ENAMETOOLONG", "file name too long"}, + {64, "EHOSTDOWN", "host is down"}, + {65, "EHOSTUNREACH", "no route to host"}, + {66, "ENOTEMPTY", "directory not empty"}, + {67, "EPROCLIM", "too many processes"}, + {68, "EUSERS", "too many users"}, + {69, "EDQUOT", "disc quota exceeded"}, + {70, "ESTALE", "stale NFS file handle"}, + {71, "EREMOTE", "too many levels of remote in path"}, + {72, "EBADRPC", "RPC struct is bad"}, + {73, "ERPCMISMATCH", "RPC version wrong"}, + {74, "EPROGUNAVAIL", "RPC prog. not avail"}, + {75, "EPROGMISMATCH", "program version wrong"}, + {76, "EPROCUNAVAIL", "bad procedure for program"}, + {77, "ENOLCK", "no locks available"}, + {78, "ENOSYS", "function not implemented"}, + {79, "EFTYPE", "inappropriate file type or format"}, + {80, "EAUTH", "authentication error"}, + {81, "ENEEDAUTH", "need authenticator"}, + {82, "EPWROFF", "device power is off"}, + {83, "EDEVERR", "device error"}, + {84, "EOVERFLOW", "value too large to be stored in data type"}, + {85, "EBADEXEC", "bad executable (or shared library)"}, + {86, "EBADARCH", "bad CPU type in executable"}, + {87, "ESHLIBVERS", "shared library version mismatch"}, + {88, "EBADMACHO", "malformed Mach-o file"}, + {89, "ECANCELED", "operation canceled"}, + {90, "EIDRM", "identifier removed"}, + {91, "ENOMSG", "no message of desired type"}, + {92, "EILSEQ", "illegal byte sequence"}, + {93, "ENOATTR", "attribute not found"}, + {94, "EBADMSG", "bad message"}, + {95, "EMULTIHOP", "EMULTIHOP (Reserved)"}, + {96, "ENODATA", "no message available on STREAM"}, + {97, "ENOLINK", "ENOLINK (Reserved)"}, + {98, "ENOSR", "no STREAM resources"}, + {99, "ENOSTR", "not a STREAM"}, + {100, "EPROTO", "protocol error"}, + {101, "ETIME", "STREAM ioctl timeout"}, + {102, "EOPNOTSUPP", "operation not supported on socket"}, + {103, "ENOPOLICY", "policy not found"}, + {104, "ENOTRECOVERABLE", "state not recoverable"}, + {105, "EOWNERDEAD", "previous owner died"}, + {106, "EQFULL", "interface output queue is full"}, } // Signal table -var signals = [...]string{ - 1: "hangup", - 2: "interrupt", - 3: "quit", - 4: "illegal instruction", - 5: "trace/BPT trap", - 6: "abort trap", - 7: "EMT trap", - 8: "floating point exception", - 9: "killed", - 10: "bus error", - 11: "segmentation fault", - 12: "bad system call", - 13: "broken pipe", - 14: "alarm clock", - 15: "terminated", - 16: "urgent I/O condition", - 17: "suspended (signal)", - 18: "suspended", - 19: "continued", - 20: "child exited", - 21: "stopped (tty input)", - 22: "stopped (tty output)", - 23: "I/O possible", - 24: "cputime limit exceeded", - 25: "filesize limit exceeded", - 26: "virtual timer expired", - 27: "profiling timer expired", - 28: "window size changes", - 29: "information request", - 30: "user defined signal 1", - 31: "user defined signal 2", +var signalList = [...]struct { + num syscall.Signal + name string + desc string +}{ + {1, "SIGHUP", "hangup"}, + {2, "SIGINT", "interrupt"}, + {3, "SIGQUIT", "quit"}, + {4, "SIGILL", "illegal instruction"}, + {5, "SIGTRAP", "trace/BPT trap"}, + {6, "SIGABRT", "abort trap"}, + {7, "SIGEMT", "EMT trap"}, + {8, "SIGFPE", "floating point exception"}, + {9, "SIGKILL", "killed"}, + {10, "SIGBUS", "bus error"}, + {11, "SIGSEGV", "segmentation fault"}, + {12, "SIGSYS", "bad system call"}, + {13, "SIGPIPE", "broken pipe"}, + {14, "SIGALRM", "alarm clock"}, + {15, "SIGTERM", "terminated"}, + {16, "SIGURG", "urgent I/O condition"}, + {17, "SIGSTOP", "suspended (signal)"}, + {18, "SIGTSTP", "suspended"}, + {19, "SIGCONT", "continued"}, + {20, "SIGCHLD", "child exited"}, + {21, "SIGTTIN", "stopped (tty input)"}, + {22, "SIGTTOU", "stopped (tty output)"}, + {23, "SIGIO", "I/O possible"}, + {24, "SIGXCPU", "cputime limit exceeded"}, + {25, "SIGXFSZ", "filesize limit exceeded"}, + {26, "SIGVTALRM", "virtual timer expired"}, + {27, "SIGPROF", "profiling timer expired"}, + {28, "SIGWINCH", "window size changes"}, + {29, "SIGINFO", "information request"}, + {30, "SIGUSR1", "user defined signal 1"}, + {31, "SIGUSR2", "user defined signal 2"}, } diff --git a/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go index 6419c65e13b..6d56d8a059d 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go @@ -1473,6 +1473,12 @@ const ( WORDSIZE = 0x40 WSTOPPED = 0x8 WUNTRACED = 0x2 + XATTR_CREATE = 0x2 + XATTR_NODEFAULT = 0x10 + XATTR_NOFOLLOW = 0x1 + XATTR_NOSECURITY = 0x8 + XATTR_REPLACE = 0x4 + XATTR_SHOWCOMPRESSION = 0x20 ) // Errors @@ -1624,146 +1630,154 @@ const ( ) // Error table -var errors = [...]string{ - 1: "operation not permitted", - 2: "no such file or directory", - 3: "no such process", - 4: "interrupted system call", - 5: "input/output error", - 6: "device not configured", - 7: "argument list too long", - 8: "exec format error", - 9: "bad file descriptor", - 10: "no child processes", - 11: "resource deadlock avoided", - 12: "cannot allocate memory", - 13: "permission denied", - 14: "bad address", - 15: "block device required", - 16: "resource busy", - 17: "file exists", - 18: "cross-device link", - 19: "operation not supported by device", - 20: "not a directory", - 21: "is a directory", - 22: "invalid argument", - 23: "too many open files in system", - 24: "too many open files", - 25: "inappropriate ioctl for device", - 26: "text file busy", - 27: "file too large", - 28: "no space left on device", - 29: "illegal seek", - 30: "read-only file system", - 31: "too many links", - 32: "broken pipe", - 33: "numerical argument out of domain", - 34: "result too large", - 35: "resource temporarily unavailable", - 36: "operation now in progress", - 37: "operation already in progress", - 38: "socket operation on non-socket", - 39: "destination address required", - 40: "message too long", - 41: "protocol wrong type for socket", - 42: "protocol not available", - 43: "protocol not supported", - 44: "socket type not supported", - 45: "operation not supported", - 46: "protocol family not supported", - 47: "address family not supported by protocol family", - 48: "address already in use", - 49: "can't assign requested address", - 50: "network is down", - 51: "network is unreachable", - 52: "network dropped connection on reset", - 53: "software caused connection abort", - 54: "connection reset by peer", - 55: "no buffer space available", - 56: "socket is already connected", - 57: "socket is not connected", - 58: "can't send after socket shutdown", - 59: "too many references: can't splice", - 60: "operation timed out", - 61: "connection refused", - 62: "too many levels of symbolic links", - 63: "file name too long", - 64: "host is down", - 65: "no route to host", - 66: "directory not empty", - 67: "too many processes", - 68: "too many users", - 69: "disc quota exceeded", - 70: "stale NFS file handle", - 71: "too many levels of remote in path", - 72: "RPC struct is bad", - 73: "RPC version wrong", - 74: "RPC prog. not avail", - 75: "program version wrong", - 76: "bad procedure for program", - 77: "no locks available", - 78: "function not implemented", - 79: "inappropriate file type or format", - 80: "authentication error", - 81: "need authenticator", - 82: "device power is off", - 83: "device error", - 84: "value too large to be stored in data type", - 85: "bad executable (or shared library)", - 86: "bad CPU type in executable", - 87: "shared library version mismatch", - 88: "malformed Mach-o file", - 89: "operation canceled", - 90: "identifier removed", - 91: "no message of desired type", - 92: "illegal byte sequence", - 93: "attribute not found", - 94: "bad message", - 95: "EMULTIHOP (Reserved)", - 96: "no message available on STREAM", - 97: "ENOLINK (Reserved)", - 98: "no STREAM resources", - 99: "not a STREAM", - 100: "protocol error", - 101: "STREAM ioctl timeout", - 102: "operation not supported on socket", - 103: "policy not found", - 104: "state not recoverable", - 105: "previous owner died", - 106: "interface output queue is full", +var errorList = [...]struct { + num syscall.Errno + name string + desc string +}{ + {1, "EPERM", "operation not permitted"}, + {2, "ENOENT", "no such file or directory"}, + {3, "ESRCH", "no such process"}, + {4, "EINTR", "interrupted system call"}, + {5, "EIO", "input/output error"}, + {6, "ENXIO", "device not configured"}, + {7, "E2BIG", "argument list too long"}, + {8, "ENOEXEC", "exec format error"}, + {9, "EBADF", "bad file descriptor"}, + {10, "ECHILD", "no child processes"}, + {11, "EDEADLK", "resource deadlock avoided"}, + {12, "ENOMEM", "cannot allocate memory"}, + {13, "EACCES", "permission denied"}, + {14, "EFAULT", "bad address"}, + {15, "ENOTBLK", "block device required"}, + {16, "EBUSY", "resource busy"}, + {17, "EEXIST", "file exists"}, + {18, "EXDEV", "cross-device link"}, + {19, "ENODEV", "operation not supported by device"}, + {20, "ENOTDIR", "not a directory"}, + {21, "EISDIR", "is a directory"}, + {22, "EINVAL", "invalid argument"}, + {23, "ENFILE", "too many open files in system"}, + {24, "EMFILE", "too many open files"}, + {25, "ENOTTY", "inappropriate ioctl for device"}, + {26, "ETXTBSY", "text file busy"}, + {27, "EFBIG", "file too large"}, + {28, "ENOSPC", "no space left on device"}, + {29, "ESPIPE", "illegal seek"}, + {30, "EROFS", "read-only file system"}, + {31, "EMLINK", "too many links"}, + {32, "EPIPE", "broken pipe"}, + {33, "EDOM", "numerical argument out of domain"}, + {34, "ERANGE", "result too large"}, + {35, "EAGAIN", "resource temporarily unavailable"}, + {36, "EINPROGRESS", "operation now in progress"}, + {37, "EALREADY", "operation already in progress"}, + {38, "ENOTSOCK", "socket operation on non-socket"}, + {39, "EDESTADDRREQ", "destination address required"}, + {40, "EMSGSIZE", "message too long"}, + {41, "EPROTOTYPE", "protocol wrong type for socket"}, + {42, "ENOPROTOOPT", "protocol not available"}, + {43, "EPROTONOSUPPORT", "protocol not supported"}, + {44, "ESOCKTNOSUPPORT", "socket type not supported"}, + {45, "ENOTSUP", "operation not supported"}, + {46, "EPFNOSUPPORT", "protocol family not supported"}, + {47, "EAFNOSUPPORT", "address family not supported by protocol family"}, + {48, "EADDRINUSE", "address already in use"}, + {49, "EADDRNOTAVAIL", "can't assign requested address"}, + {50, "ENETDOWN", "network is down"}, + {51, "ENETUNREACH", "network is unreachable"}, + {52, "ENETRESET", "network dropped connection on reset"}, + {53, "ECONNABORTED", "software caused connection abort"}, + {54, "ECONNRESET", "connection reset by peer"}, + {55, "ENOBUFS", "no buffer space available"}, + {56, "EISCONN", "socket is already connected"}, + {57, "ENOTCONN", "socket is not connected"}, + {58, "ESHUTDOWN", "can't send after socket shutdown"}, + {59, "ETOOMANYREFS", "too many references: can't splice"}, + {60, "ETIMEDOUT", "operation timed out"}, + {61, "ECONNREFUSED", "connection refused"}, + {62, "ELOOP", "too many levels of symbolic links"}, + {63, "ENAMETOOLONG", "file name too long"}, + {64, "EHOSTDOWN", "host is down"}, + {65, "EHOSTUNREACH", "no route to host"}, + {66, "ENOTEMPTY", "directory not empty"}, + {67, "EPROCLIM", "too many processes"}, + {68, "EUSERS", "too many users"}, + {69, "EDQUOT", "disc quota exceeded"}, + {70, "ESTALE", "stale NFS file handle"}, + {71, "EREMOTE", "too many levels of remote in path"}, + {72, "EBADRPC", "RPC struct is bad"}, + {73, "ERPCMISMATCH", "RPC version wrong"}, + {74, "EPROGUNAVAIL", "RPC prog. not avail"}, + {75, "EPROGMISMATCH", "program version wrong"}, + {76, "EPROCUNAVAIL", "bad procedure for program"}, + {77, "ENOLCK", "no locks available"}, + {78, "ENOSYS", "function not implemented"}, + {79, "EFTYPE", "inappropriate file type or format"}, + {80, "EAUTH", "authentication error"}, + {81, "ENEEDAUTH", "need authenticator"}, + {82, "EPWROFF", "device power is off"}, + {83, "EDEVERR", "device error"}, + {84, "EOVERFLOW", "value too large to be stored in data type"}, + {85, "EBADEXEC", "bad executable (or shared library)"}, + {86, "EBADARCH", "bad CPU type in executable"}, + {87, "ESHLIBVERS", "shared library version mismatch"}, + {88, "EBADMACHO", "malformed Mach-o file"}, + {89, "ECANCELED", "operation canceled"}, + {90, "EIDRM", "identifier removed"}, + {91, "ENOMSG", "no message of desired type"}, + {92, "EILSEQ", "illegal byte sequence"}, + {93, "ENOATTR", "attribute not found"}, + {94, "EBADMSG", "bad message"}, + {95, "EMULTIHOP", "EMULTIHOP (Reserved)"}, + {96, "ENODATA", "no message available on STREAM"}, + {97, "ENOLINK", "ENOLINK (Reserved)"}, + {98, "ENOSR", "no STREAM resources"}, + {99, "ENOSTR", "not a STREAM"}, + {100, "EPROTO", "protocol error"}, + {101, "ETIME", "STREAM ioctl timeout"}, + {102, "EOPNOTSUPP", "operation not supported on socket"}, + {103, "ENOPOLICY", "policy not found"}, + {104, "ENOTRECOVERABLE", "state not recoverable"}, + {105, "EOWNERDEAD", "previous owner died"}, + {106, "EQFULL", "interface output queue is full"}, } // Signal table -var signals = [...]string{ - 1: "hangup", - 2: "interrupt", - 3: "quit", - 4: "illegal instruction", - 5: "trace/BPT trap", - 6: "abort trap", - 7: "EMT trap", - 8: "floating point exception", - 9: "killed", - 10: "bus error", - 11: "segmentation fault", - 12: "bad system call", - 13: "broken pipe", - 14: "alarm clock", - 15: "terminated", - 16: "urgent I/O condition", - 17: "suspended (signal)", - 18: "suspended", - 19: "continued", - 20: "child exited", - 21: "stopped (tty input)", - 22: "stopped (tty output)", - 23: "I/O possible", - 24: "cputime limit exceeded", - 25: "filesize limit exceeded", - 26: "virtual timer expired", - 27: "profiling timer expired", - 28: "window size changes", - 29: "information request", - 30: "user defined signal 1", - 31: "user defined signal 2", +var signalList = [...]struct { + num syscall.Signal + name string + desc string +}{ + {1, "SIGHUP", "hangup"}, + {2, "SIGINT", "interrupt"}, + {3, "SIGQUIT", "quit"}, + {4, "SIGILL", "illegal instruction"}, + {5, "SIGTRAP", "trace/BPT trap"}, + {6, "SIGABRT", "abort trap"}, + {7, "SIGEMT", "EMT trap"}, + {8, "SIGFPE", "floating point exception"}, + {9, "SIGKILL", "killed"}, + {10, "SIGBUS", "bus error"}, + {11, "SIGSEGV", "segmentation fault"}, + {12, "SIGSYS", "bad system call"}, + {13, "SIGPIPE", "broken pipe"}, + {14, "SIGALRM", "alarm clock"}, + {15, "SIGTERM", "terminated"}, + {16, "SIGURG", "urgent I/O condition"}, + {17, "SIGSTOP", "suspended (signal)"}, + {18, "SIGTSTP", "suspended"}, + {19, "SIGCONT", "continued"}, + {20, "SIGCHLD", "child exited"}, + {21, "SIGTTIN", "stopped (tty input)"}, + {22, "SIGTTOU", "stopped (tty output)"}, + {23, "SIGIO", "I/O possible"}, + {24, "SIGXCPU", "cputime limit exceeded"}, + {25, "SIGXFSZ", "filesize limit exceeded"}, + {26, "SIGVTALRM", "virtual timer expired"}, + {27, "SIGPROF", "profiling timer expired"}, + {28, "SIGWINCH", "window size changes"}, + {29, "SIGINFO", "information request"}, + {30, "SIGUSR1", "user defined signal 1"}, + {31, "SIGUSR2", "user defined signal 2"}, } diff --git a/vendor/golang.org/x/sys/unix/zerrors_dragonfly_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_dragonfly_amd64.go index d96015505fb..46a082b6d59 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_dragonfly_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_dragonfly_amd64.go @@ -980,7 +980,10 @@ const ( RLIMIT_CPU = 0x0 RLIMIT_DATA = 0x2 RLIMIT_FSIZE = 0x1 + RLIMIT_MEMLOCK = 0x6 RLIMIT_NOFILE = 0x8 + RLIMIT_NPROC = 0x7 + RLIMIT_RSS = 0x5 RLIMIT_STACK = 0x3 RLIM_INFINITY = 0x7fffffffffffffff RTAX_AUTHOR = 0x6 @@ -1434,142 +1437,150 @@ const ( ) // Error table -var errors = [...]string{ - 1: "operation not permitted", - 2: "no such file or directory", - 3: "no such process", - 4: "interrupted system call", - 5: "input/output error", - 6: "device not configured", - 7: "argument list too long", - 8: "exec format error", - 9: "bad file descriptor", - 10: "no child processes", - 11: "resource deadlock avoided", - 12: "cannot allocate memory", - 13: "permission denied", - 14: "bad address", - 15: "block device required", - 16: "device busy", - 17: "file exists", - 18: "cross-device link", - 19: "operation not supported by device", - 20: "not a directory", - 21: "is a directory", - 22: "invalid argument", - 23: "too many open files in system", - 24: "too many open files", - 25: "inappropriate ioctl for device", - 26: "text file busy", - 27: "file too large", - 28: "no space left on device", - 29: "illegal seek", - 30: "read-only file system", - 31: "too many links", - 32: "broken pipe", - 33: "numerical argument out of domain", - 34: "result too large", - 35: "resource temporarily unavailable", - 36: "operation now in progress", - 37: "operation already in progress", - 38: "socket operation on non-socket", - 39: "destination address required", - 40: "message too long", - 41: "protocol wrong type for socket", - 42: "protocol not available", - 43: "protocol not supported", - 44: "socket type not supported", - 45: "operation not supported", - 46: "protocol family not supported", - 47: "address family not supported by protocol family", - 48: "address already in use", - 49: "can't assign requested address", - 50: "network is down", - 51: "network is unreachable", - 52: "network dropped connection on reset", - 53: "software caused connection abort", - 54: "connection reset by peer", - 55: "no buffer space available", - 56: "socket is already connected", - 57: "socket is not connected", - 58: "can't send after socket shutdown", - 59: "too many references: can't splice", - 60: "operation timed out", - 61: "connection refused", - 62: "too many levels of symbolic links", - 63: "file name too long", - 64: "host is down", - 65: "no route to host", - 66: "directory not empty", - 67: "too many processes", - 68: "too many users", - 69: "disc quota exceeded", - 70: "stale NFS file handle", - 71: "too many levels of remote in path", - 72: "RPC struct is bad", - 73: "RPC version wrong", - 74: "RPC prog. not avail", - 75: "program version wrong", - 76: "bad procedure for program", - 77: "no locks available", - 78: "function not implemented", - 79: "inappropriate file type or format", - 80: "authentication error", - 81: "need authenticator", - 82: "identifier removed", - 83: "no message of desired type", - 84: "value too large to be stored in data type", - 85: "operation canceled", - 86: "illegal byte sequence", - 87: "attribute not found", - 88: "programming error", - 89: "bad message", - 90: "multihop attempted", - 91: "link has been severed", - 92: "protocol error", - 93: "no medium found", - 94: "unknown error: 94", - 95: "unknown error: 95", - 96: "unknown error: 96", - 97: "unknown error: 97", - 98: "unknown error: 98", - 99: "unknown error: 99", +var errorList = [...]struct { + num syscall.Errno + name string + desc string +}{ + {1, "EPERM", "operation not permitted"}, + {2, "ENOENT", "no such file or directory"}, + {3, "ESRCH", "no such process"}, + {4, "EINTR", "interrupted system call"}, + {5, "EIO", "input/output error"}, + {6, "ENXIO", "device not configured"}, + {7, "E2BIG", "argument list too long"}, + {8, "ENOEXEC", "exec format error"}, + {9, "EBADF", "bad file descriptor"}, + {10, "ECHILD", "no child processes"}, + {11, "EDEADLK", "resource deadlock avoided"}, + {12, "ENOMEM", "cannot allocate memory"}, + {13, "EACCES", "permission denied"}, + {14, "EFAULT", "bad address"}, + {15, "ENOTBLK", "block device required"}, + {16, "EBUSY", "device busy"}, + {17, "EEXIST", "file exists"}, + {18, "EXDEV", "cross-device link"}, + {19, "ENODEV", "operation not supported by device"}, + {20, "ENOTDIR", "not a directory"}, + {21, "EISDIR", "is a directory"}, + {22, "EINVAL", "invalid argument"}, + {23, "ENFILE", "too many open files in system"}, + {24, "EMFILE", "too many open files"}, + {25, "ENOTTY", "inappropriate ioctl for device"}, + {26, "ETXTBSY", "text file busy"}, + {27, "EFBIG", "file too large"}, + {28, "ENOSPC", "no space left on device"}, + {29, "ESPIPE", "illegal seek"}, + {30, "EROFS", "read-only file system"}, + {31, "EMLINK", "too many links"}, + {32, "EPIPE", "broken pipe"}, + {33, "EDOM", "numerical argument out of domain"}, + {34, "ERANGE", "result too large"}, + {35, "EWOULDBLOCK", "resource temporarily unavailable"}, + {36, "EINPROGRESS", "operation now in progress"}, + {37, "EALREADY", "operation already in progress"}, + {38, "ENOTSOCK", "socket operation on non-socket"}, + {39, "EDESTADDRREQ", "destination address required"}, + {40, "EMSGSIZE", "message too long"}, + {41, "EPROTOTYPE", "protocol wrong type for socket"}, + {42, "ENOPROTOOPT", "protocol not available"}, + {43, "EPROTONOSUPPORT", "protocol not supported"}, + {44, "ESOCKTNOSUPPORT", "socket type not supported"}, + {45, "EOPNOTSUPP", "operation not supported"}, + {46, "EPFNOSUPPORT", "protocol family not supported"}, + {47, "EAFNOSUPPORT", "address family not supported by protocol family"}, + {48, "EADDRINUSE", "address already in use"}, + {49, "EADDRNOTAVAIL", "can't assign requested address"}, + {50, "ENETDOWN", "network is down"}, + {51, "ENETUNREACH", "network is unreachable"}, + {52, "ENETRESET", "network dropped connection on reset"}, + {53, "ECONNABORTED", "software caused connection abort"}, + {54, "ECONNRESET", "connection reset by peer"}, + {55, "ENOBUFS", "no buffer space available"}, + {56, "EISCONN", "socket is already connected"}, + {57, "ENOTCONN", "socket is not connected"}, + {58, "ESHUTDOWN", "can't send after socket shutdown"}, + {59, "ETOOMANYREFS", "too many references: can't splice"}, + {60, "ETIMEDOUT", "operation timed out"}, + {61, "ECONNREFUSED", "connection refused"}, + {62, "ELOOP", "too many levels of symbolic links"}, + {63, "ENAMETOOLONG", "file name too long"}, + {64, "EHOSTDOWN", "host is down"}, + {65, "EHOSTUNREACH", "no route to host"}, + {66, "ENOTEMPTY", "directory not empty"}, + {67, "EPROCLIM", "too many processes"}, + {68, "EUSERS", "too many users"}, + {69, "EDQUOT", "disc quota exceeded"}, + {70, "ESTALE", "stale NFS file handle"}, + {71, "EREMOTE", "too many levels of remote in path"}, + {72, "EBADRPC", "RPC struct is bad"}, + {73, "ERPCMISMATCH", "RPC version wrong"}, + {74, "EPROGUNAVAIL", "RPC prog. not avail"}, + {75, "EPROGMISMATCH", "program version wrong"}, + {76, "EPROCUNAVAIL", "bad procedure for program"}, + {77, "ENOLCK", "no locks available"}, + {78, "ENOSYS", "function not implemented"}, + {79, "EFTYPE", "inappropriate file type or format"}, + {80, "EAUTH", "authentication error"}, + {81, "ENEEDAUTH", "need authenticator"}, + {82, "EIDRM", "identifier removed"}, + {83, "ENOMSG", "no message of desired type"}, + {84, "EOVERFLOW", "value too large to be stored in data type"}, + {85, "ECANCELED", "operation canceled"}, + {86, "EILSEQ", "illegal byte sequence"}, + {87, "ENOATTR", "attribute not found"}, + {88, "EDOOFUS", "programming error"}, + {89, "EBADMSG", "bad message"}, + {90, "EMULTIHOP", "multihop attempted"}, + {91, "ENOLINK", "link has been severed"}, + {92, "EPROTO", "protocol error"}, + {93, "ENOMEDIUM", "no medium found"}, + {94, "EUNUSED94", "unknown error: 94"}, + {95, "EUNUSED95", "unknown error: 95"}, + {96, "EUNUSED96", "unknown error: 96"}, + {97, "EUNUSED97", "unknown error: 97"}, + {98, "EUNUSED98", "unknown error: 98"}, + {99, "ELAST", "unknown error: 99"}, } // Signal table -var signals = [...]string{ - 1: "hangup", - 2: "interrupt", - 3: "quit", - 4: "illegal instruction", - 5: "trace/BPT trap", - 6: "abort trap", - 7: "EMT trap", - 8: "floating point exception", - 9: "killed", - 10: "bus error", - 11: "segmentation fault", - 12: "bad system call", - 13: "broken pipe", - 14: "alarm clock", - 15: "terminated", - 16: "urgent I/O condition", - 17: "suspended (signal)", - 18: "suspended", - 19: "continued", - 20: "child exited", - 21: "stopped (tty input)", - 22: "stopped (tty output)", - 23: "I/O possible", - 24: "cputime limit exceeded", - 25: "filesize limit exceeded", - 26: "virtual timer expired", - 27: "profiling timer expired", - 28: "window size changes", - 29: "information request", - 30: "user defined signal 1", - 31: "user defined signal 2", - 32: "thread Scheduler", - 33: "checkPoint", - 34: "checkPointExit", +var signalList = [...]struct { + num syscall.Signal + name string + desc string +}{ + {1, "SIGHUP", "hangup"}, + {2, "SIGINT", "interrupt"}, + {3, "SIGQUIT", "quit"}, + {4, "SIGILL", "illegal instruction"}, + {5, "SIGTRAP", "trace/BPT trap"}, + {6, "SIGIOT", "abort trap"}, + {7, "SIGEMT", "EMT trap"}, + {8, "SIGFPE", "floating point exception"}, + {9, "SIGKILL", "killed"}, + {10, "SIGBUS", "bus error"}, + {11, "SIGSEGV", "segmentation fault"}, + {12, "SIGSYS", "bad system call"}, + {13, "SIGPIPE", "broken pipe"}, + {14, "SIGALRM", "alarm clock"}, + {15, "SIGTERM", "terminated"}, + {16, "SIGURG", "urgent I/O condition"}, + {17, "SIGSTOP", "suspended (signal)"}, + {18, "SIGTSTP", "suspended"}, + {19, "SIGCONT", "continued"}, + {20, "SIGCHLD", "child exited"}, + {21, "SIGTTIN", "stopped (tty input)"}, + {22, "SIGTTOU", "stopped (tty output)"}, + {23, "SIGIO", "I/O possible"}, + {24, "SIGXCPU", "cputime limit exceeded"}, + {25, "SIGXFSZ", "filesize limit exceeded"}, + {26, "SIGVTALRM", "virtual timer expired"}, + {27, "SIGPROF", "profiling timer expired"}, + {28, "SIGWINCH", "window size changes"}, + {29, "SIGINFO", "information request"}, + {30, "SIGUSR1", "user defined signal 1"}, + {31, "SIGUSR2", "user defined signal 2"}, + {32, "SIGTHR", "thread Scheduler"}, + {33, "SIGCKPT", "checkPoint"}, + {34, "SIGCKPTEXIT", "checkPointExit"}, } diff --git a/vendor/golang.org/x/sys/unix/zerrors_freebsd_386.go b/vendor/golang.org/x/sys/unix/zerrors_freebsd_386.go index a8b05878e38..2947dc0382e 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_freebsd_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_freebsd_386.go @@ -1619,138 +1619,146 @@ const ( ) // Error table -var errors = [...]string{ - 1: "operation not permitted", - 2: "no such file or directory", - 3: "no such process", - 4: "interrupted system call", - 5: "input/output error", - 6: "device not configured", - 7: "argument list too long", - 8: "exec format error", - 9: "bad file descriptor", - 10: "no child processes", - 11: "resource deadlock avoided", - 12: "cannot allocate memory", - 13: "permission denied", - 14: "bad address", - 15: "block device required", - 16: "device busy", - 17: "file exists", - 18: "cross-device link", - 19: "operation not supported by device", - 20: "not a directory", - 21: "is a directory", - 22: "invalid argument", - 23: "too many open files in system", - 24: "too many open files", - 25: "inappropriate ioctl for device", - 26: "text file busy", - 27: "file too large", - 28: "no space left on device", - 29: "illegal seek", - 30: "read-only file system", - 31: "too many links", - 32: "broken pipe", - 33: "numerical argument out of domain", - 34: "result too large", - 35: "resource temporarily unavailable", - 36: "operation now in progress", - 37: "operation already in progress", - 38: "socket operation on non-socket", - 39: "destination address required", - 40: "message too long", - 41: "protocol wrong type for socket", - 42: "protocol not available", - 43: "protocol not supported", - 44: "socket type not supported", - 45: "operation not supported", - 46: "protocol family not supported", - 47: "address family not supported by protocol family", - 48: "address already in use", - 49: "can't assign requested address", - 50: "network is down", - 51: "network is unreachable", - 52: "network dropped connection on reset", - 53: "software caused connection abort", - 54: "connection reset by peer", - 55: "no buffer space available", - 56: "socket is already connected", - 57: "socket is not connected", - 58: "can't send after socket shutdown", - 59: "too many references: can't splice", - 60: "operation timed out", - 61: "connection refused", - 62: "too many levels of symbolic links", - 63: "file name too long", - 64: "host is down", - 65: "no route to host", - 66: "directory not empty", - 67: "too many processes", - 68: "too many users", - 69: "disc quota exceeded", - 70: "stale NFS file handle", - 71: "too many levels of remote in path", - 72: "RPC struct is bad", - 73: "RPC version wrong", - 74: "RPC prog. not avail", - 75: "program version wrong", - 76: "bad procedure for program", - 77: "no locks available", - 78: "function not implemented", - 79: "inappropriate file type or format", - 80: "authentication error", - 81: "need authenticator", - 82: "identifier removed", - 83: "no message of desired type", - 84: "value too large to be stored in data type", - 85: "operation canceled", - 86: "illegal byte sequence", - 87: "attribute not found", - 88: "programming error", - 89: "bad message", - 90: "multihop attempted", - 91: "link has been severed", - 92: "protocol error", - 93: "capabilities insufficient", - 94: "not permitted in capability mode", - 95: "state not recoverable", - 96: "previous owner died", +var errorList = [...]struct { + num syscall.Errno + name string + desc string +}{ + {1, "EPERM", "operation not permitted"}, + {2, "ENOENT", "no such file or directory"}, + {3, "ESRCH", "no such process"}, + {4, "EINTR", "interrupted system call"}, + {5, "EIO", "input/output error"}, + {6, "ENXIO", "device not configured"}, + {7, "E2BIG", "argument list too long"}, + {8, "ENOEXEC", "exec format error"}, + {9, "EBADF", "bad file descriptor"}, + {10, "ECHILD", "no child processes"}, + {11, "EDEADLK", "resource deadlock avoided"}, + {12, "ENOMEM", "cannot allocate memory"}, + {13, "EACCES", "permission denied"}, + {14, "EFAULT", "bad address"}, + {15, "ENOTBLK", "block device required"}, + {16, "EBUSY", "device busy"}, + {17, "EEXIST", "file exists"}, + {18, "EXDEV", "cross-device link"}, + {19, "ENODEV", "operation not supported by device"}, + {20, "ENOTDIR", "not a directory"}, + {21, "EISDIR", "is a directory"}, + {22, "EINVAL", "invalid argument"}, + {23, "ENFILE", "too many open files in system"}, + {24, "EMFILE", "too many open files"}, + {25, "ENOTTY", "inappropriate ioctl for device"}, + {26, "ETXTBSY", "text file busy"}, + {27, "EFBIG", "file too large"}, + {28, "ENOSPC", "no space left on device"}, + {29, "ESPIPE", "illegal seek"}, + {30, "EROFS", "read-only file system"}, + {31, "EMLINK", "too many links"}, + {32, "EPIPE", "broken pipe"}, + {33, "EDOM", "numerical argument out of domain"}, + {34, "ERANGE", "result too large"}, + {35, "EAGAIN", "resource temporarily unavailable"}, + {36, "EINPROGRESS", "operation now in progress"}, + {37, "EALREADY", "operation already in progress"}, + {38, "ENOTSOCK", "socket operation on non-socket"}, + {39, "EDESTADDRREQ", "destination address required"}, + {40, "EMSGSIZE", "message too long"}, + {41, "EPROTOTYPE", "protocol wrong type for socket"}, + {42, "ENOPROTOOPT", "protocol not available"}, + {43, "EPROTONOSUPPORT", "protocol not supported"}, + {44, "ESOCKTNOSUPPORT", "socket type not supported"}, + {45, "EOPNOTSUPP", "operation not supported"}, + {46, "EPFNOSUPPORT", "protocol family not supported"}, + {47, "EAFNOSUPPORT", "address family not supported by protocol family"}, + {48, "EADDRINUSE", "address already in use"}, + {49, "EADDRNOTAVAIL", "can't assign requested address"}, + {50, "ENETDOWN", "network is down"}, + {51, "ENETUNREACH", "network is unreachable"}, + {52, "ENETRESET", "network dropped connection on reset"}, + {53, "ECONNABORTED", "software caused connection abort"}, + {54, "ECONNRESET", "connection reset by peer"}, + {55, "ENOBUFS", "no buffer space available"}, + {56, "EISCONN", "socket is already connected"}, + {57, "ENOTCONN", "socket is not connected"}, + {58, "ESHUTDOWN", "can't send after socket shutdown"}, + {59, "ETOOMANYREFS", "too many references: can't splice"}, + {60, "ETIMEDOUT", "operation timed out"}, + {61, "ECONNREFUSED", "connection refused"}, + {62, "ELOOP", "too many levels of symbolic links"}, + {63, "ENAMETOOLONG", "file name too long"}, + {64, "EHOSTDOWN", "host is down"}, + {65, "EHOSTUNREACH", "no route to host"}, + {66, "ENOTEMPTY", "directory not empty"}, + {67, "EPROCLIM", "too many processes"}, + {68, "EUSERS", "too many users"}, + {69, "EDQUOT", "disc quota exceeded"}, + {70, "ESTALE", "stale NFS file handle"}, + {71, "EREMOTE", "too many levels of remote in path"}, + {72, "EBADRPC", "RPC struct is bad"}, + {73, "ERPCMISMATCH", "RPC version wrong"}, + {74, "EPROGUNAVAIL", "RPC prog. not avail"}, + {75, "EPROGMISMATCH", "program version wrong"}, + {76, "EPROCUNAVAIL", "bad procedure for program"}, + {77, "ENOLCK", "no locks available"}, + {78, "ENOSYS", "function not implemented"}, + {79, "EFTYPE", "inappropriate file type or format"}, + {80, "EAUTH", "authentication error"}, + {81, "ENEEDAUTH", "need authenticator"}, + {82, "EIDRM", "identifier removed"}, + {83, "ENOMSG", "no message of desired type"}, + {84, "EOVERFLOW", "value too large to be stored in data type"}, + {85, "ECANCELED", "operation canceled"}, + {86, "EILSEQ", "illegal byte sequence"}, + {87, "ENOATTR", "attribute not found"}, + {88, "EDOOFUS", "programming error"}, + {89, "EBADMSG", "bad message"}, + {90, "EMULTIHOP", "multihop attempted"}, + {91, "ENOLINK", "link has been severed"}, + {92, "EPROTO", "protocol error"}, + {93, "ENOTCAPABLE", "capabilities insufficient"}, + {94, "ECAPMODE", "not permitted in capability mode"}, + {95, "ENOTRECOVERABLE", "state not recoverable"}, + {96, "EOWNERDEAD", "previous owner died"}, } // Signal table -var signals = [...]string{ - 1: "hangup", - 2: "interrupt", - 3: "quit", - 4: "illegal instruction", - 5: "trace/BPT trap", - 6: "abort trap", - 7: "EMT trap", - 8: "floating point exception", - 9: "killed", - 10: "bus error", - 11: "segmentation fault", - 12: "bad system call", - 13: "broken pipe", - 14: "alarm clock", - 15: "terminated", - 16: "urgent I/O condition", - 17: "suspended (signal)", - 18: "suspended", - 19: "continued", - 20: "child exited", - 21: "stopped (tty input)", - 22: "stopped (tty output)", - 23: "I/O possible", - 24: "cputime limit exceeded", - 25: "filesize limit exceeded", - 26: "virtual timer expired", - 27: "profiling timer expired", - 28: "window size changes", - 29: "information request", - 30: "user defined signal 1", - 31: "user defined signal 2", - 32: "unknown signal", - 33: "unknown signal", +var signalList = [...]struct { + num syscall.Signal + name string + desc string +}{ + {1, "SIGHUP", "hangup"}, + {2, "SIGINT", "interrupt"}, + {3, "SIGQUIT", "quit"}, + {4, "SIGILL", "illegal instruction"}, + {5, "SIGTRAP", "trace/BPT trap"}, + {6, "SIGIOT", "abort trap"}, + {7, "SIGEMT", "EMT trap"}, + {8, "SIGFPE", "floating point exception"}, + {9, "SIGKILL", "killed"}, + {10, "SIGBUS", "bus error"}, + {11, "SIGSEGV", "segmentation fault"}, + {12, "SIGSYS", "bad system call"}, + {13, "SIGPIPE", "broken pipe"}, + {14, "SIGALRM", "alarm clock"}, + {15, "SIGTERM", "terminated"}, + {16, "SIGURG", "urgent I/O condition"}, + {17, "SIGSTOP", "suspended (signal)"}, + {18, "SIGTSTP", "suspended"}, + {19, "SIGCONT", "continued"}, + {20, "SIGCHLD", "child exited"}, + {21, "SIGTTIN", "stopped (tty input)"}, + {22, "SIGTTOU", "stopped (tty output)"}, + {23, "SIGIO", "I/O possible"}, + {24, "SIGXCPU", "cputime limit exceeded"}, + {25, "SIGXFSZ", "filesize limit exceeded"}, + {26, "SIGVTALRM", "virtual timer expired"}, + {27, "SIGPROF", "profiling timer expired"}, + {28, "SIGWINCH", "window size changes"}, + {29, "SIGINFO", "information request"}, + {30, "SIGUSR1", "user defined signal 1"}, + {31, "SIGUSR2", "user defined signal 2"}, + {32, "SIGTHR", "unknown signal"}, + {33, "SIGLIBRT", "unknown signal"}, } diff --git a/vendor/golang.org/x/sys/unix/zerrors_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_freebsd_amd64.go index cf5f01260e5..c600d012d06 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_freebsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_freebsd_amd64.go @@ -1620,138 +1620,146 @@ const ( ) // Error table -var errors = [...]string{ - 1: "operation not permitted", - 2: "no such file or directory", - 3: "no such process", - 4: "interrupted system call", - 5: "input/output error", - 6: "device not configured", - 7: "argument list too long", - 8: "exec format error", - 9: "bad file descriptor", - 10: "no child processes", - 11: "resource deadlock avoided", - 12: "cannot allocate memory", - 13: "permission denied", - 14: "bad address", - 15: "block device required", - 16: "device busy", - 17: "file exists", - 18: "cross-device link", - 19: "operation not supported by device", - 20: "not a directory", - 21: "is a directory", - 22: "invalid argument", - 23: "too many open files in system", - 24: "too many open files", - 25: "inappropriate ioctl for device", - 26: "text file busy", - 27: "file too large", - 28: "no space left on device", - 29: "illegal seek", - 30: "read-only file system", - 31: "too many links", - 32: "broken pipe", - 33: "numerical argument out of domain", - 34: "result too large", - 35: "resource temporarily unavailable", - 36: "operation now in progress", - 37: "operation already in progress", - 38: "socket operation on non-socket", - 39: "destination address required", - 40: "message too long", - 41: "protocol wrong type for socket", - 42: "protocol not available", - 43: "protocol not supported", - 44: "socket type not supported", - 45: "operation not supported", - 46: "protocol family not supported", - 47: "address family not supported by protocol family", - 48: "address already in use", - 49: "can't assign requested address", - 50: "network is down", - 51: "network is unreachable", - 52: "network dropped connection on reset", - 53: "software caused connection abort", - 54: "connection reset by peer", - 55: "no buffer space available", - 56: "socket is already connected", - 57: "socket is not connected", - 58: "can't send after socket shutdown", - 59: "too many references: can't splice", - 60: "operation timed out", - 61: "connection refused", - 62: "too many levels of symbolic links", - 63: "file name too long", - 64: "host is down", - 65: "no route to host", - 66: "directory not empty", - 67: "too many processes", - 68: "too many users", - 69: "disc quota exceeded", - 70: "stale NFS file handle", - 71: "too many levels of remote in path", - 72: "RPC struct is bad", - 73: "RPC version wrong", - 74: "RPC prog. not avail", - 75: "program version wrong", - 76: "bad procedure for program", - 77: "no locks available", - 78: "function not implemented", - 79: "inappropriate file type or format", - 80: "authentication error", - 81: "need authenticator", - 82: "identifier removed", - 83: "no message of desired type", - 84: "value too large to be stored in data type", - 85: "operation canceled", - 86: "illegal byte sequence", - 87: "attribute not found", - 88: "programming error", - 89: "bad message", - 90: "multihop attempted", - 91: "link has been severed", - 92: "protocol error", - 93: "capabilities insufficient", - 94: "not permitted in capability mode", - 95: "state not recoverable", - 96: "previous owner died", +var errorList = [...]struct { + num syscall.Errno + name string + desc string +}{ + {1, "EPERM", "operation not permitted"}, + {2, "ENOENT", "no such file or directory"}, + {3, "ESRCH", "no such process"}, + {4, "EINTR", "interrupted system call"}, + {5, "EIO", "input/output error"}, + {6, "ENXIO", "device not configured"}, + {7, "E2BIG", "argument list too long"}, + {8, "ENOEXEC", "exec format error"}, + {9, "EBADF", "bad file descriptor"}, + {10, "ECHILD", "no child processes"}, + {11, "EDEADLK", "resource deadlock avoided"}, + {12, "ENOMEM", "cannot allocate memory"}, + {13, "EACCES", "permission denied"}, + {14, "EFAULT", "bad address"}, + {15, "ENOTBLK", "block device required"}, + {16, "EBUSY", "device busy"}, + {17, "EEXIST", "file exists"}, + {18, "EXDEV", "cross-device link"}, + {19, "ENODEV", "operation not supported by device"}, + {20, "ENOTDIR", "not a directory"}, + {21, "EISDIR", "is a directory"}, + {22, "EINVAL", "invalid argument"}, + {23, "ENFILE", "too many open files in system"}, + {24, "EMFILE", "too many open files"}, + {25, "ENOTTY", "inappropriate ioctl for device"}, + {26, "ETXTBSY", "text file busy"}, + {27, "EFBIG", "file too large"}, + {28, "ENOSPC", "no space left on device"}, + {29, "ESPIPE", "illegal seek"}, + {30, "EROFS", "read-only file system"}, + {31, "EMLINK", "too many links"}, + {32, "EPIPE", "broken pipe"}, + {33, "EDOM", "numerical argument out of domain"}, + {34, "ERANGE", "result too large"}, + {35, "EAGAIN", "resource temporarily unavailable"}, + {36, "EINPROGRESS", "operation now in progress"}, + {37, "EALREADY", "operation already in progress"}, + {38, "ENOTSOCK", "socket operation on non-socket"}, + {39, "EDESTADDRREQ", "destination address required"}, + {40, "EMSGSIZE", "message too long"}, + {41, "EPROTOTYPE", "protocol wrong type for socket"}, + {42, "ENOPROTOOPT", "protocol not available"}, + {43, "EPROTONOSUPPORT", "protocol not supported"}, + {44, "ESOCKTNOSUPPORT", "socket type not supported"}, + {45, "EOPNOTSUPP", "operation not supported"}, + {46, "EPFNOSUPPORT", "protocol family not supported"}, + {47, "EAFNOSUPPORT", "address family not supported by protocol family"}, + {48, "EADDRINUSE", "address already in use"}, + {49, "EADDRNOTAVAIL", "can't assign requested address"}, + {50, "ENETDOWN", "network is down"}, + {51, "ENETUNREACH", "network is unreachable"}, + {52, "ENETRESET", "network dropped connection on reset"}, + {53, "ECONNABORTED", "software caused connection abort"}, + {54, "ECONNRESET", "connection reset by peer"}, + {55, "ENOBUFS", "no buffer space available"}, + {56, "EISCONN", "socket is already connected"}, + {57, "ENOTCONN", "socket is not connected"}, + {58, "ESHUTDOWN", "can't send after socket shutdown"}, + {59, "ETOOMANYREFS", "too many references: can't splice"}, + {60, "ETIMEDOUT", "operation timed out"}, + {61, "ECONNREFUSED", "connection refused"}, + {62, "ELOOP", "too many levels of symbolic links"}, + {63, "ENAMETOOLONG", "file name too long"}, + {64, "EHOSTDOWN", "host is down"}, + {65, "EHOSTUNREACH", "no route to host"}, + {66, "ENOTEMPTY", "directory not empty"}, + {67, "EPROCLIM", "too many processes"}, + {68, "EUSERS", "too many users"}, + {69, "EDQUOT", "disc quota exceeded"}, + {70, "ESTALE", "stale NFS file handle"}, + {71, "EREMOTE", "too many levels of remote in path"}, + {72, "EBADRPC", "RPC struct is bad"}, + {73, "ERPCMISMATCH", "RPC version wrong"}, + {74, "EPROGUNAVAIL", "RPC prog. not avail"}, + {75, "EPROGMISMATCH", "program version wrong"}, + {76, "EPROCUNAVAIL", "bad procedure for program"}, + {77, "ENOLCK", "no locks available"}, + {78, "ENOSYS", "function not implemented"}, + {79, "EFTYPE", "inappropriate file type or format"}, + {80, "EAUTH", "authentication error"}, + {81, "ENEEDAUTH", "need authenticator"}, + {82, "EIDRM", "identifier removed"}, + {83, "ENOMSG", "no message of desired type"}, + {84, "EOVERFLOW", "value too large to be stored in data type"}, + {85, "ECANCELED", "operation canceled"}, + {86, "EILSEQ", "illegal byte sequence"}, + {87, "ENOATTR", "attribute not found"}, + {88, "EDOOFUS", "programming error"}, + {89, "EBADMSG", "bad message"}, + {90, "EMULTIHOP", "multihop attempted"}, + {91, "ENOLINK", "link has been severed"}, + {92, "EPROTO", "protocol error"}, + {93, "ENOTCAPABLE", "capabilities insufficient"}, + {94, "ECAPMODE", "not permitted in capability mode"}, + {95, "ENOTRECOVERABLE", "state not recoverable"}, + {96, "EOWNERDEAD", "previous owner died"}, } // Signal table -var signals = [...]string{ - 1: "hangup", - 2: "interrupt", - 3: "quit", - 4: "illegal instruction", - 5: "trace/BPT trap", - 6: "abort trap", - 7: "EMT trap", - 8: "floating point exception", - 9: "killed", - 10: "bus error", - 11: "segmentation fault", - 12: "bad system call", - 13: "broken pipe", - 14: "alarm clock", - 15: "terminated", - 16: "urgent I/O condition", - 17: "suspended (signal)", - 18: "suspended", - 19: "continued", - 20: "child exited", - 21: "stopped (tty input)", - 22: "stopped (tty output)", - 23: "I/O possible", - 24: "cputime limit exceeded", - 25: "filesize limit exceeded", - 26: "virtual timer expired", - 27: "profiling timer expired", - 28: "window size changes", - 29: "information request", - 30: "user defined signal 1", - 31: "user defined signal 2", - 32: "unknown signal", - 33: "unknown signal", +var signalList = [...]struct { + num syscall.Signal + name string + desc string +}{ + {1, "SIGHUP", "hangup"}, + {2, "SIGINT", "interrupt"}, + {3, "SIGQUIT", "quit"}, + {4, "SIGILL", "illegal instruction"}, + {5, "SIGTRAP", "trace/BPT trap"}, + {6, "SIGIOT", "abort trap"}, + {7, "SIGEMT", "EMT trap"}, + {8, "SIGFPE", "floating point exception"}, + {9, "SIGKILL", "killed"}, + {10, "SIGBUS", "bus error"}, + {11, "SIGSEGV", "segmentation fault"}, + {12, "SIGSYS", "bad system call"}, + {13, "SIGPIPE", "broken pipe"}, + {14, "SIGALRM", "alarm clock"}, + {15, "SIGTERM", "terminated"}, + {16, "SIGURG", "urgent I/O condition"}, + {17, "SIGSTOP", "suspended (signal)"}, + {18, "SIGTSTP", "suspended"}, + {19, "SIGCONT", "continued"}, + {20, "SIGCHLD", "child exited"}, + {21, "SIGTTIN", "stopped (tty input)"}, + {22, "SIGTTOU", "stopped (tty output)"}, + {23, "SIGIO", "I/O possible"}, + {24, "SIGXCPU", "cputime limit exceeded"}, + {25, "SIGXFSZ", "filesize limit exceeded"}, + {26, "SIGVTALRM", "virtual timer expired"}, + {27, "SIGPROF", "profiling timer expired"}, + {28, "SIGWINCH", "window size changes"}, + {29, "SIGINFO", "information request"}, + {30, "SIGUSR1", "user defined signal 1"}, + {31, "SIGUSR2", "user defined signal 2"}, + {32, "SIGTHR", "unknown signal"}, + {33, "SIGLIBRT", "unknown signal"}, } diff --git a/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm.go b/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm.go index 9bbb90ad8a0..e8240d2397b 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm.go @@ -1628,138 +1628,146 @@ const ( ) // Error table -var errors = [...]string{ - 1: "operation not permitted", - 2: "no such file or directory", - 3: "no such process", - 4: "interrupted system call", - 5: "input/output error", - 6: "device not configured", - 7: "argument list too long", - 8: "exec format error", - 9: "bad file descriptor", - 10: "no child processes", - 11: "resource deadlock avoided", - 12: "cannot allocate memory", - 13: "permission denied", - 14: "bad address", - 15: "block device required", - 16: "device busy", - 17: "file exists", - 18: "cross-device link", - 19: "operation not supported by device", - 20: "not a directory", - 21: "is a directory", - 22: "invalid argument", - 23: "too many open files in system", - 24: "too many open files", - 25: "inappropriate ioctl for device", - 26: "text file busy", - 27: "file too large", - 28: "no space left on device", - 29: "illegal seek", - 30: "read-only file system", - 31: "too many links", - 32: "broken pipe", - 33: "numerical argument out of domain", - 34: "result too large", - 35: "resource temporarily unavailable", - 36: "operation now in progress", - 37: "operation already in progress", - 38: "socket operation on non-socket", - 39: "destination address required", - 40: "message too long", - 41: "protocol wrong type for socket", - 42: "protocol not available", - 43: "protocol not supported", - 44: "socket type not supported", - 45: "operation not supported", - 46: "protocol family not supported", - 47: "address family not supported by protocol family", - 48: "address already in use", - 49: "can't assign requested address", - 50: "network is down", - 51: "network is unreachable", - 52: "network dropped connection on reset", - 53: "software caused connection abort", - 54: "connection reset by peer", - 55: "no buffer space available", - 56: "socket is already connected", - 57: "socket is not connected", - 58: "can't send after socket shutdown", - 59: "too many references: can't splice", - 60: "operation timed out", - 61: "connection refused", - 62: "too many levels of symbolic links", - 63: "file name too long", - 64: "host is down", - 65: "no route to host", - 66: "directory not empty", - 67: "too many processes", - 68: "too many users", - 69: "disc quota exceeded", - 70: "stale NFS file handle", - 71: "too many levels of remote in path", - 72: "RPC struct is bad", - 73: "RPC version wrong", - 74: "RPC prog. not avail", - 75: "program version wrong", - 76: "bad procedure for program", - 77: "no locks available", - 78: "function not implemented", - 79: "inappropriate file type or format", - 80: "authentication error", - 81: "need authenticator", - 82: "identifier removed", - 83: "no message of desired type", - 84: "value too large to be stored in data type", - 85: "operation canceled", - 86: "illegal byte sequence", - 87: "attribute not found", - 88: "programming error", - 89: "bad message", - 90: "multihop attempted", - 91: "link has been severed", - 92: "protocol error", - 93: "capabilities insufficient", - 94: "not permitted in capability mode", - 95: "state not recoverable", - 96: "previous owner died", +var errorList = [...]struct { + num syscall.Errno + name string + desc string +}{ + {1, "EPERM", "operation not permitted"}, + {2, "ENOENT", "no such file or directory"}, + {3, "ESRCH", "no such process"}, + {4, "EINTR", "interrupted system call"}, + {5, "EIO", "input/output error"}, + {6, "ENXIO", "device not configured"}, + {7, "E2BIG", "argument list too long"}, + {8, "ENOEXEC", "exec format error"}, + {9, "EBADF", "bad file descriptor"}, + {10, "ECHILD", "no child processes"}, + {11, "EDEADLK", "resource deadlock avoided"}, + {12, "ENOMEM", "cannot allocate memory"}, + {13, "EACCES", "permission denied"}, + {14, "EFAULT", "bad address"}, + {15, "ENOTBLK", "block device required"}, + {16, "EBUSY", "device busy"}, + {17, "EEXIST", "file exists"}, + {18, "EXDEV", "cross-device link"}, + {19, "ENODEV", "operation not supported by device"}, + {20, "ENOTDIR", "not a directory"}, + {21, "EISDIR", "is a directory"}, + {22, "EINVAL", "invalid argument"}, + {23, "ENFILE", "too many open files in system"}, + {24, "EMFILE", "too many open files"}, + {25, "ENOTTY", "inappropriate ioctl for device"}, + {26, "ETXTBSY", "text file busy"}, + {27, "EFBIG", "file too large"}, + {28, "ENOSPC", "no space left on device"}, + {29, "ESPIPE", "illegal seek"}, + {30, "EROFS", "read-only file system"}, + {31, "EMLINK", "too many links"}, + {32, "EPIPE", "broken pipe"}, + {33, "EDOM", "numerical argument out of domain"}, + {34, "ERANGE", "result too large"}, + {35, "EAGAIN", "resource temporarily unavailable"}, + {36, "EINPROGRESS", "operation now in progress"}, + {37, "EALREADY", "operation already in progress"}, + {38, "ENOTSOCK", "socket operation on non-socket"}, + {39, "EDESTADDRREQ", "destination address required"}, + {40, "EMSGSIZE", "message too long"}, + {41, "EPROTOTYPE", "protocol wrong type for socket"}, + {42, "ENOPROTOOPT", "protocol not available"}, + {43, "EPROTONOSUPPORT", "protocol not supported"}, + {44, "ESOCKTNOSUPPORT", "socket type not supported"}, + {45, "EOPNOTSUPP", "operation not supported"}, + {46, "EPFNOSUPPORT", "protocol family not supported"}, + {47, "EAFNOSUPPORT", "address family not supported by protocol family"}, + {48, "EADDRINUSE", "address already in use"}, + {49, "EADDRNOTAVAIL", "can't assign requested address"}, + {50, "ENETDOWN", "network is down"}, + {51, "ENETUNREACH", "network is unreachable"}, + {52, "ENETRESET", "network dropped connection on reset"}, + {53, "ECONNABORTED", "software caused connection abort"}, + {54, "ECONNRESET", "connection reset by peer"}, + {55, "ENOBUFS", "no buffer space available"}, + {56, "EISCONN", "socket is already connected"}, + {57, "ENOTCONN", "socket is not connected"}, + {58, "ESHUTDOWN", "can't send after socket shutdown"}, + {59, "ETOOMANYREFS", "too many references: can't splice"}, + {60, "ETIMEDOUT", "operation timed out"}, + {61, "ECONNREFUSED", "connection refused"}, + {62, "ELOOP", "too many levels of symbolic links"}, + {63, "ENAMETOOLONG", "file name too long"}, + {64, "EHOSTDOWN", "host is down"}, + {65, "EHOSTUNREACH", "no route to host"}, + {66, "ENOTEMPTY", "directory not empty"}, + {67, "EPROCLIM", "too many processes"}, + {68, "EUSERS", "too many users"}, + {69, "EDQUOT", "disc quota exceeded"}, + {70, "ESTALE", "stale NFS file handle"}, + {71, "EREMOTE", "too many levels of remote in path"}, + {72, "EBADRPC", "RPC struct is bad"}, + {73, "ERPCMISMATCH", "RPC version wrong"}, + {74, "EPROGUNAVAIL", "RPC prog. not avail"}, + {75, "EPROGMISMATCH", "program version wrong"}, + {76, "EPROCUNAVAIL", "bad procedure for program"}, + {77, "ENOLCK", "no locks available"}, + {78, "ENOSYS", "function not implemented"}, + {79, "EFTYPE", "inappropriate file type or format"}, + {80, "EAUTH", "authentication error"}, + {81, "ENEEDAUTH", "need authenticator"}, + {82, "EIDRM", "identifier removed"}, + {83, "ENOMSG", "no message of desired type"}, + {84, "EOVERFLOW", "value too large to be stored in data type"}, + {85, "ECANCELED", "operation canceled"}, + {86, "EILSEQ", "illegal byte sequence"}, + {87, "ENOATTR", "attribute not found"}, + {88, "EDOOFUS", "programming error"}, + {89, "EBADMSG", "bad message"}, + {90, "EMULTIHOP", "multihop attempted"}, + {91, "ENOLINK", "link has been severed"}, + {92, "EPROTO", "protocol error"}, + {93, "ENOTCAPABLE", "capabilities insufficient"}, + {94, "ECAPMODE", "not permitted in capability mode"}, + {95, "ENOTRECOVERABLE", "state not recoverable"}, + {96, "EOWNERDEAD", "previous owner died"}, } // Signal table -var signals = [...]string{ - 1: "hangup", - 2: "interrupt", - 3: "quit", - 4: "illegal instruction", - 5: "trace/BPT trap", - 6: "abort trap", - 7: "EMT trap", - 8: "floating point exception", - 9: "killed", - 10: "bus error", - 11: "segmentation fault", - 12: "bad system call", - 13: "broken pipe", - 14: "alarm clock", - 15: "terminated", - 16: "urgent I/O condition", - 17: "suspended (signal)", - 18: "suspended", - 19: "continued", - 20: "child exited", - 21: "stopped (tty input)", - 22: "stopped (tty output)", - 23: "I/O possible", - 24: "cputime limit exceeded", - 25: "filesize limit exceeded", - 26: "virtual timer expired", - 27: "profiling timer expired", - 28: "window size changes", - 29: "information request", - 30: "user defined signal 1", - 31: "user defined signal 2", - 32: "unknown signal", - 33: "unknown signal", +var signalList = [...]struct { + num syscall.Signal + name string + desc string +}{ + {1, "SIGHUP", "hangup"}, + {2, "SIGINT", "interrupt"}, + {3, "SIGQUIT", "quit"}, + {4, "SIGILL", "illegal instruction"}, + {5, "SIGTRAP", "trace/BPT trap"}, + {6, "SIGIOT", "abort trap"}, + {7, "SIGEMT", "EMT trap"}, + {8, "SIGFPE", "floating point exception"}, + {9, "SIGKILL", "killed"}, + {10, "SIGBUS", "bus error"}, + {11, "SIGSEGV", "segmentation fault"}, + {12, "SIGSYS", "bad system call"}, + {13, "SIGPIPE", "broken pipe"}, + {14, "SIGALRM", "alarm clock"}, + {15, "SIGTERM", "terminated"}, + {16, "SIGURG", "urgent I/O condition"}, + {17, "SIGSTOP", "suspended (signal)"}, + {18, "SIGTSTP", "suspended"}, + {19, "SIGCONT", "continued"}, + {20, "SIGCHLD", "child exited"}, + {21, "SIGTTIN", "stopped (tty input)"}, + {22, "SIGTTOU", "stopped (tty output)"}, + {23, "SIGIO", "I/O possible"}, + {24, "SIGXCPU", "cputime limit exceeded"}, + {25, "SIGXFSZ", "filesize limit exceeded"}, + {26, "SIGVTALRM", "virtual timer expired"}, + {27, "SIGPROF", "profiling timer expired"}, + {28, "SIGWINCH", "window size changes"}, + {29, "SIGINFO", "information request"}, + {30, "SIGUSR1", "user defined signal 1"}, + {31, "SIGUSR2", "user defined signal 2"}, + {32, "SIGTHR", "unknown signal"}, + {33, "SIGLIBRT", "unknown signal"}, } diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go index fa063740894..ee17d4bd4a5 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go @@ -11,6 +11,11 @@ package unix import "syscall" const ( + AAFS_MAGIC = 0x5a3c69f0 + ADFS_SUPER_MAGIC = 0xadf5 + AFFS_SUPER_MAGIC = 0xadff + AFS_FS_MAGIC = 0x6b414653 + AFS_SUPER_MAGIC = 0x5346414f AF_ALG = 0x26 AF_APPLETALK = 0x5 AF_ASH = 0x12 @@ -66,6 +71,7 @@ const ( ALG_SET_IV = 0x2 ALG_SET_KEY = 0x1 ALG_SET_OP = 0x3 + ANON_INODE_FS_MAGIC = 0x9041934 ARPHRD_6LOWPAN = 0x339 ARPHRD_ADAPT = 0x108 ARPHRD_APPLETLK = 0x8 @@ -133,6 +139,7 @@ const ( ARPHRD_VOID = 0xffff ARPHRD_VSOCKMON = 0x33a ARPHRD_X25 = 0x10f + AUTOFS_SUPER_MAGIC = 0x187 B0 = 0x0 B1000000 = 0x1008 B110 = 0x3 @@ -164,6 +171,9 @@ const ( B75 = 0x2 B921600 = 0x1007 B9600 = 0xd + BALLOON_KVM_MAGIC = 0x13661366 + BDEVFS_MAGIC = 0x62646576 + BINFMTFS_MAGIC = 0x42494e4d BLKBSZGET = 0x80041270 BLKBSZSET = 0x40041271 BLKFLSBUF = 0x1261 @@ -188,6 +198,7 @@ const ( BPF_AND = 0x50 BPF_B = 0x10 BPF_DIV = 0x30 + BPF_FS_MAGIC = 0xcafe4a11 BPF_H = 0x8 BPF_IMM = 0x0 BPF_IND = 0x40 @@ -229,6 +240,8 @@ const ( BS0 = 0x0 BS1 = 0x2000 BSDLY = 0x2000 + BTRFS_SUPER_MAGIC = 0x9123683e + BTRFS_TEST_MAGIC = 0x73727279 CAN_BCM = 0x2 CAN_EFF_FLAG = 0x80000000 CAN_EFF_ID_BITS = 0x1d @@ -252,6 +265,8 @@ const ( CBAUD = 0x100f CBAUDEX = 0x1000 CFLUSH = 0xf + CGROUP2_SUPER_MAGIC = 0x63677270 + CGROUP_SUPER_MAGIC = 0x27e0eb CIBAUD = 0x100f0000 CLOCAL = 0x800 CLOCK_BOOTTIME = 0x7 @@ -294,10 +309,12 @@ const ( CLONE_VFORK = 0x4000 CLONE_VM = 0x100 CMSPAR = 0x40000000 + CODA_SUPER_MAGIC = 0x73757245 CR0 = 0x0 CR1 = 0x200 CR2 = 0x400 CR3 = 0x600 + CRAMFS_MAGIC = 0x28cd3d45 CRDLY = 0x600 CREAD = 0x80 CRTSCTS = 0x80000000 @@ -312,6 +329,9 @@ const ( CSTOP = 0x13 CSTOPB = 0x40 CSUSP = 0x1a + DAXFS_MAGIC = 0x64646178 + DEBUGFS_MAGIC = 0x64626720 + DEVPTS_SUPER_MAGIC = 0x1cd1 DT_BLK = 0x6 DT_CHR = 0x2 DT_DIR = 0x4 @@ -328,9 +348,12 @@ const ( ECHOKE = 0x800 ECHONL = 0x40 ECHOPRT = 0x400 + ECRYPTFS_SUPER_MAGIC = 0xf15f EFD_CLOEXEC = 0x80000 EFD_NONBLOCK = 0x800 EFD_SEMAPHORE = 0x1 + EFIVARFS_MAGIC = 0xde5e81e4 + EFS_SUPER_MAGIC = 0x414a53 ENCODING_DEFAULT = 0x0 ENCODING_FM_MARK = 0x3 ENCODING_FM_SPACE = 0x4 @@ -392,6 +415,7 @@ const ( ETH_P_ECONET = 0x18 ETH_P_EDSA = 0xdada ETH_P_ERSPAN = 0x88be + ETH_P_ERSPAN2 = 0x22eb ETH_P_FCOE = 0x8906 ETH_P_FIP = 0x8914 ETH_P_HDLC = 0x19 @@ -445,9 +469,14 @@ const ( ETH_P_WCCP = 0x883e ETH_P_X25 = 0x805 ETH_P_XDSA = 0xf8 + EXABYTE_ENABLE_NEST = 0xf0 + EXT2_SUPER_MAGIC = 0xef53 + EXT3_SUPER_MAGIC = 0xef53 + EXT4_SUPER_MAGIC = 0xef53 EXTA = 0xe EXTB = 0xf EXTPROC = 0x10000 + F2FS_SUPER_MAGIC = 0xf2f52010 FALLOC_FL_COLLAPSE_RANGE = 0x8 FALLOC_FL_INSERT_RANGE = 0x20 FALLOC_FL_KEEP_SIZE = 0x1 @@ -461,6 +490,7 @@ const ( FF1 = 0x8000 FFDLY = 0x8000 FLUSHO = 0x1000 + FP_XSTATE_MAGIC2 = 0x46505845 FS_ENCRYPTION_MODE_AES_128_CBC = 0x5 FS_ENCRYPTION_MODE_AES_128_CTS = 0x6 FS_ENCRYPTION_MODE_AES_256_CBC = 0x3 @@ -481,6 +511,7 @@ const ( FS_POLICY_FLAGS_PAD_8 = 0x1 FS_POLICY_FLAGS_PAD_MASK = 0x3 FS_POLICY_FLAGS_VALID = 0x3 + FUTEXFS_SUPER_MAGIC = 0xbad1dea F_ADD_SEALS = 0x409 F_DUPFD = 0x0 F_DUPFD_CLOEXEC = 0x406 @@ -542,6 +573,49 @@ const ( GENL_UNS_ADMIN_PERM = 0x10 GRND_NONBLOCK = 0x1 GRND_RANDOM = 0x2 + HDIO_DRIVE_CMD = 0x31f + HDIO_DRIVE_CMD_AEB = 0x31e + HDIO_DRIVE_CMD_HDR_SIZE = 0x4 + HDIO_DRIVE_HOB_HDR_SIZE = 0x8 + HDIO_DRIVE_RESET = 0x31c + HDIO_DRIVE_TASK = 0x31e + HDIO_DRIVE_TASKFILE = 0x31d + HDIO_DRIVE_TASK_HDR_SIZE = 0x8 + HDIO_GETGEO = 0x301 + HDIO_GET_32BIT = 0x309 + HDIO_GET_ACOUSTIC = 0x30f + HDIO_GET_ADDRESS = 0x310 + HDIO_GET_BUSSTATE = 0x31a + HDIO_GET_DMA = 0x30b + HDIO_GET_IDENTITY = 0x30d + HDIO_GET_KEEPSETTINGS = 0x308 + HDIO_GET_MULTCOUNT = 0x304 + HDIO_GET_NICE = 0x30c + HDIO_GET_NOWERR = 0x30a + HDIO_GET_QDMA = 0x305 + HDIO_GET_UNMASKINTR = 0x302 + HDIO_GET_WCACHE = 0x30e + HDIO_OBSOLETE_IDENTITY = 0x307 + HDIO_SCAN_HWIF = 0x328 + HDIO_SET_32BIT = 0x324 + HDIO_SET_ACOUSTIC = 0x32c + HDIO_SET_ADDRESS = 0x32f + HDIO_SET_BUSSTATE = 0x32d + HDIO_SET_DMA = 0x326 + HDIO_SET_KEEPSETTINGS = 0x323 + HDIO_SET_MULTCOUNT = 0x321 + HDIO_SET_NICE = 0x329 + HDIO_SET_NOWERR = 0x325 + HDIO_SET_PIO_MODE = 0x327 + HDIO_SET_QDMA = 0x32e + HDIO_SET_UNMASKINTR = 0x322 + HDIO_SET_WCACHE = 0x32b + HDIO_SET_XFER = 0x306 + HDIO_TRISTATE_HWIF = 0x31b + HDIO_UNREGISTER_HWIF = 0x32a + HOSTFS_SUPER_MAGIC = 0xc0ffee + HPFS_SUPER_MAGIC = 0xf995e849 + HUGETLBFS_MAGIC = 0x958458f6 HUPCL = 0x400 IBSHIFT = 0x10 ICANON = 0x2 @@ -792,12 +866,14 @@ const ( IP_UNICAST_IF = 0x32 IP_XFRM_POLICY = 0x11 ISIG = 0x1 + ISOFS_SUPER_MAGIC = 0x9660 ISTRIP = 0x20 IUCLC = 0x200 IUTF8 = 0x4000 IXANY = 0x800 IXOFF = 0x1000 IXON = 0x400 + JFFS2_SUPER_MAGIC = 0x72b6 KEYCTL_ASSUME_AUTHORITY = 0x10 KEYCTL_CHOWN = 0x4 KEYCTL_CLEAR = 0x7 @@ -894,9 +970,15 @@ const ( MCL_CURRENT = 0x1 MCL_FUTURE = 0x2 MCL_ONFAULT = 0x4 + MINIX2_SUPER_MAGIC = 0x2468 + MINIX2_SUPER_MAGIC2 = 0x2478 + MINIX3_SUPER_MAGIC = 0x4d5a + MINIX_SUPER_MAGIC = 0x137f + MINIX_SUPER_MAGIC2 = 0x138f MNT_DETACH = 0x2 MNT_EXPIRE = 0x4 MNT_FORCE = 0x1 + MSDOS_SUPER_MAGIC = 0x4d44 MSG_BATCH = 0x40000 MSG_CMSG_CLOEXEC = 0x40000000 MSG_CONFIRM = 0x800 @@ -956,7 +1038,9 @@ const ( MS_SYNCHRONOUS = 0x10 MS_UNBINDABLE = 0x20000 MS_VERBOSE = 0x8000 + MTD_INODE_FS_MAGIC = 0x11307854 NAME_MAX = 0xff + NCP_SUPER_MAGIC = 0x564c NETLINK_ADD_MEMBERSHIP = 0x1 NETLINK_AUDIT = 0x9 NETLINK_BROADCAST_ERROR = 0x4 @@ -991,6 +1075,37 @@ const ( NETLINK_UNUSED = 0x1 NETLINK_USERSOCK = 0x2 NETLINK_XFRM = 0x6 + NFNETLINK_V0 = 0x0 + NFNLGRP_ACCT_QUOTA = 0x8 + NFNLGRP_CONNTRACK_DESTROY = 0x3 + NFNLGRP_CONNTRACK_EXP_DESTROY = 0x6 + NFNLGRP_CONNTRACK_EXP_NEW = 0x4 + NFNLGRP_CONNTRACK_EXP_UPDATE = 0x5 + NFNLGRP_CONNTRACK_NEW = 0x1 + NFNLGRP_CONNTRACK_UPDATE = 0x2 + NFNLGRP_MAX = 0x9 + NFNLGRP_NFTABLES = 0x7 + NFNLGRP_NFTRACE = 0x9 + NFNLGRP_NONE = 0x0 + NFNL_BATCH_MAX = 0x1 + NFNL_MSG_BATCH_BEGIN = 0x10 + NFNL_MSG_BATCH_END = 0x11 + NFNL_NFA_NEST = 0x8000 + NFNL_SUBSYS_ACCT = 0x7 + NFNL_SUBSYS_COUNT = 0xc + NFNL_SUBSYS_CTHELPER = 0x9 + NFNL_SUBSYS_CTNETLINK = 0x1 + NFNL_SUBSYS_CTNETLINK_EXP = 0x2 + NFNL_SUBSYS_CTNETLINK_TIMEOUT = 0x8 + NFNL_SUBSYS_IPSET = 0x6 + NFNL_SUBSYS_NFTABLES = 0xa + NFNL_SUBSYS_NFT_COMPAT = 0xb + NFNL_SUBSYS_NONE = 0x0 + NFNL_SUBSYS_OSF = 0x5 + NFNL_SUBSYS_QUEUE = 0x3 + NFNL_SUBSYS_ULOG = 0x4 + NFS_SUPER_MAGIC = 0x6969 + NILFS_SUPER_MAGIC = 0x3434 NL0 = 0x0 NL1 = 0x100 NLA_ALIGNTO = 0x4 @@ -1023,6 +1138,8 @@ const ( NLM_F_REQUEST = 0x1 NLM_F_ROOT = 0x100 NOFLSH = 0x80 + NSFS_MAGIC = 0x6e736673 + OCFS2_SUPER_MAGIC = 0x7461636f OCRNL = 0x8 OFDEL = 0x80 OFILL = 0x40 @@ -1030,7 +1147,9 @@ const ( ONLCR = 0x4 ONLRET = 0x20 ONOCR = 0x10 + OPENPROM_SUPER_MAGIC = 0x9fa1 OPOST = 0x1 + OVERLAYFS_SUPER_MAGIC = 0x794c7630 O_ACCMODE = 0x3 O_APPEND = 0x400 O_ASYNC = 0x2000 @@ -1117,14 +1236,17 @@ const ( PERF_EVENT_IOC_ID = 0x80042407 PERF_EVENT_IOC_PAUSE_OUTPUT = 0x40042409 PERF_EVENT_IOC_PERIOD = 0x40082404 + PERF_EVENT_IOC_QUERY_BPF = 0xc004240a PERF_EVENT_IOC_REFRESH = 0x2402 PERF_EVENT_IOC_RESET = 0x2403 PERF_EVENT_IOC_SET_BPF = 0x40042408 PERF_EVENT_IOC_SET_FILTER = 0x40042406 PERF_EVENT_IOC_SET_OUTPUT = 0x2405 + PIPEFS_MAGIC = 0x50495045 PRIO_PGRP = 0x1 PRIO_PROCESS = 0x0 PRIO_USER = 0x2 + PROC_SUPER_MAGIC = 0x9fa0 PROT_EXEC = 0x4 PROT_GROWSDOWN = 0x1000000 PROT_GROWSUP = 0x2000000 @@ -1230,6 +1352,7 @@ const ( PR_TSC_SIGSEGV = 0x2 PR_UNALIGN_NOPRINT = 0x1 PR_UNALIGN_SIGBUS = 0x2 + PSTOREFS_MAGIC = 0x6165676c PTRACE_ATTACH = 0x10 PTRACE_CONT = 0x7 PTRACE_DETACH = 0x11 @@ -1273,6 +1396,7 @@ const ( PTRACE_POKETEXT = 0x4 PTRACE_POKEUSR = 0x6 PTRACE_SECCOMP_GET_FILTER = 0x420c + PTRACE_SECCOMP_GET_METADATA = 0x420d PTRACE_SEIZE = 0x4206 PTRACE_SETFPREGS = 0xf PTRACE_SETFPXREGS = 0x13 @@ -1288,6 +1412,11 @@ const ( PTRACE_SYSEMU = 0x1f PTRACE_SYSEMU_SINGLESTEP = 0x20 PTRACE_TRACEME = 0x0 + QNX4_SUPER_MAGIC = 0x2f + QNX6_SUPER_MAGIC = 0x68191122 + RAMFS_MAGIC = 0x858458f6 + RDTGROUP_SUPER_MAGIC = 0x7655821 + REISERFS_SUPER_MAGIC = 0x52654973 RLIMIT_AS = 0x9 RLIMIT_CORE = 0x4 RLIMIT_CPU = 0x0 @@ -1473,6 +1602,8 @@ const ( SECCOMP_MODE_DISABLED = 0x0 SECCOMP_MODE_FILTER = 0x2 SECCOMP_MODE_STRICT = 0x1 + SECURITYFS_MAGIC = 0x73636673 + SELINUX_MAGIC = 0xf97cff8c SHUT_RD = 0x0 SHUT_RDWR = 0x2 SHUT_WR = 0x1 @@ -1557,6 +1688,23 @@ const ( SIOCSPGRP = 0x8902 SIOCSRARP = 0x8962 SIOCWANDEV = 0x894a + SMACK_MAGIC = 0x43415d53 + SMART_AUTOSAVE = 0xd2 + SMART_AUTO_OFFLINE = 0xdb + SMART_DISABLE = 0xd9 + SMART_ENABLE = 0xd8 + SMART_HCYL_PASS = 0xc2 + SMART_IMMEDIATE_OFFLINE = 0xd4 + SMART_LCYL_PASS = 0x4f + SMART_READ_LOG_SECTOR = 0xd5 + SMART_READ_THRESHOLDS = 0xd1 + SMART_READ_VALUES = 0xd0 + SMART_SAVE = 0xd3 + SMART_STATUS = 0xda + SMART_WRITE_LOG_SECTOR = 0xd6 + SMART_WRITE_THRESHOLDS = 0xd7 + SMB_SUPER_MAGIC = 0x517b + SOCKFS_MAGIC = 0x534f434b SOCK_CLOEXEC = 0x80000 SOCK_DCCP = 0x6 SOCK_DGRAM = 0x2 @@ -1667,6 +1815,8 @@ const ( SPLICE_F_MORE = 0x4 SPLICE_F_MOVE = 0x1 SPLICE_F_NONBLOCK = 0x2 + SQUASHFS_MAGIC = 0x73717368 + STACK_END_MAGIC = 0x57ac6e9d STATX_ALL = 0xfff STATX_ATIME = 0x20 STATX_ATTR_APPEND = 0x20 @@ -1688,6 +1838,7 @@ const ( STATX_TYPE = 0x1 STATX_UID = 0x8 STATX__RESERVED = 0x80000000 + SYSFS_MAGIC = 0x62656572 S_BLKSIZE = 0x200 S_IEXEC = 0x40 S_IFBLK = 0x6000 @@ -1869,7 +2020,27 @@ const ( TIOCSTI = 0x5412 TIOCSWINSZ = 0x5414 TIOCVHANGUP = 0x5437 + TMPFS_MAGIC = 0x1021994 TOSTOP = 0x100 + TPACKET_ALIGNMENT = 0x10 + TPACKET_HDRLEN = 0x34 + TP_STATUS_AVAILABLE = 0x0 + TP_STATUS_BLK_TMO = 0x20 + TP_STATUS_COPY = 0x2 + TP_STATUS_CSUMNOTREADY = 0x8 + TP_STATUS_CSUM_VALID = 0x80 + TP_STATUS_KERNEL = 0x0 + TP_STATUS_LOSING = 0x4 + TP_STATUS_SENDING = 0x2 + TP_STATUS_SEND_REQUEST = 0x1 + TP_STATUS_TS_RAW_HARDWARE = -0x80000000 + TP_STATUS_TS_SOFTWARE = 0x20000000 + TP_STATUS_TS_SYS_HARDWARE = 0x40000000 + TP_STATUS_USER = 0x1 + TP_STATUS_VLAN_TPID_VALID = 0x40 + TP_STATUS_VLAN_VALID = 0x10 + TP_STATUS_WRONG_FORMAT = 0x4 + TRACEFS_MAGIC = 0x74726163 TS_COMM_LEN = 0x20 TUNATTACHFILTER = 0x400854d5 TUNDETACHFILTER = 0x400854d6 @@ -1881,6 +2052,7 @@ const ( TUNGETVNETHDRSZ = 0x800454d7 TUNGETVNETLE = 0x800454dd TUNSETDEBUG = 0x400454c9 + TUNSETFILTEREBPF = 0x800454e1 TUNSETGROUP = 0x400454ce TUNSETIFF = 0x400454ca TUNSETIFINDEX = 0x400454da @@ -1891,13 +2063,17 @@ const ( TUNSETPERSIST = 0x400454cb TUNSETQUEUE = 0x400454d9 TUNSETSNDBUF = 0x400454d4 + TUNSETSTEERINGEBPF = 0x800454e0 TUNSETTXFILTER = 0x400454d1 TUNSETVNETBE = 0x400454de TUNSETVNETHDRSZ = 0x400454d8 TUNSETVNETLE = 0x400454dc + UDF_SUPER_MAGIC = 0x15013346 UMOUNT_NOFOLLOW = 0x8 + USBDEVICE_SUPER_MAGIC = 0x9fa2 UTIME_NOW = 0x3fffffff UTIME_OMIT = 0x3ffffffe + V9FS_MAGIC = 0x1021997 VDISCARD = 0xd VEOF = 0x4 VEOL = 0xb @@ -1939,16 +2115,99 @@ const ( WDIOC_SETPRETIMEOUT = 0xc0045708 WDIOC_SETTIMEOUT = 0xc0045706 WEXITED = 0x4 + WIN_ACKMEDIACHANGE = 0xdb + WIN_CHECKPOWERMODE1 = 0xe5 + WIN_CHECKPOWERMODE2 = 0x98 + WIN_DEVICE_RESET = 0x8 + WIN_DIAGNOSE = 0x90 + WIN_DOORLOCK = 0xde + WIN_DOORUNLOCK = 0xdf + WIN_DOWNLOAD_MICROCODE = 0x92 + WIN_FLUSH_CACHE = 0xe7 + WIN_FLUSH_CACHE_EXT = 0xea + WIN_FORMAT = 0x50 + WIN_GETMEDIASTATUS = 0xda + WIN_IDENTIFY = 0xec + WIN_IDENTIFY_DMA = 0xee + WIN_IDLEIMMEDIATE = 0xe1 + WIN_INIT = 0x60 + WIN_MEDIAEJECT = 0xed + WIN_MULTREAD = 0xc4 + WIN_MULTREAD_EXT = 0x29 + WIN_MULTWRITE = 0xc5 + WIN_MULTWRITE_EXT = 0x39 + WIN_NOP = 0x0 + WIN_PACKETCMD = 0xa0 + WIN_PIDENTIFY = 0xa1 + WIN_POSTBOOT = 0xdc + WIN_PREBOOT = 0xdd + WIN_QUEUED_SERVICE = 0xa2 + WIN_READ = 0x20 + WIN_READDMA = 0xc8 + WIN_READDMA_EXT = 0x25 + WIN_READDMA_ONCE = 0xc9 + WIN_READDMA_QUEUED = 0xc7 + WIN_READDMA_QUEUED_EXT = 0x26 + WIN_READ_BUFFER = 0xe4 + WIN_READ_EXT = 0x24 + WIN_READ_LONG = 0x22 + WIN_READ_LONG_ONCE = 0x23 + WIN_READ_NATIVE_MAX = 0xf8 + WIN_READ_NATIVE_MAX_EXT = 0x27 + WIN_READ_ONCE = 0x21 + WIN_RECAL = 0x10 + WIN_RESTORE = 0x10 + WIN_SECURITY_DISABLE = 0xf6 + WIN_SECURITY_ERASE_PREPARE = 0xf3 + WIN_SECURITY_ERASE_UNIT = 0xf4 + WIN_SECURITY_FREEZE_LOCK = 0xf5 + WIN_SECURITY_SET_PASS = 0xf1 + WIN_SECURITY_UNLOCK = 0xf2 + WIN_SEEK = 0x70 + WIN_SETFEATURES = 0xef + WIN_SETIDLE1 = 0xe3 + WIN_SETIDLE2 = 0x97 + WIN_SETMULT = 0xc6 + WIN_SET_MAX = 0xf9 + WIN_SET_MAX_EXT = 0x37 + WIN_SLEEPNOW1 = 0xe6 + WIN_SLEEPNOW2 = 0x99 + WIN_SMART = 0xb0 + WIN_SPECIFY = 0x91 + WIN_SRST = 0x8 + WIN_STANDBY = 0xe2 + WIN_STANDBY2 = 0x96 + WIN_STANDBYNOW1 = 0xe0 + WIN_STANDBYNOW2 = 0x94 + WIN_VERIFY = 0x40 + WIN_VERIFY_EXT = 0x42 + WIN_VERIFY_ONCE = 0x41 + WIN_WRITE = 0x30 + WIN_WRITEDMA = 0xca + WIN_WRITEDMA_EXT = 0x35 + WIN_WRITEDMA_ONCE = 0xcb + WIN_WRITEDMA_QUEUED = 0xcc + WIN_WRITEDMA_QUEUED_EXT = 0x36 + WIN_WRITE_BUFFER = 0xe8 + WIN_WRITE_EXT = 0x34 + WIN_WRITE_LONG = 0x32 + WIN_WRITE_LONG_ONCE = 0x33 + WIN_WRITE_ONCE = 0x31 + WIN_WRITE_SAME = 0xe9 + WIN_WRITE_VERIFY = 0x3c WNOHANG = 0x1 WNOTHREAD = 0x20000000 WNOWAIT = 0x1000000 WORDSIZE = 0x20 WSTOPPED = 0x2 WUNTRACED = 0x2 + X86_FXSR_MAGIC = 0x0 XATTR_CREATE = 0x1 XATTR_REPLACE = 0x2 XCASE = 0x4 + XENFS_SUPER_MAGIC = 0xabba1974 XTABS = 0x1800 + ZSMALLOC_MAGIC = 0x58295829 ) // Errors @@ -2128,171 +2387,179 @@ const ( ) // Error table -var errors = [...]string{ - 1: "operation not permitted", - 2: "no such file or directory", - 3: "no such process", - 4: "interrupted system call", - 5: "input/output error", - 6: "no such device or address", - 7: "argument list too long", - 8: "exec format error", - 9: "bad file descriptor", - 10: "no child processes", - 11: "resource temporarily unavailable", - 12: "cannot allocate memory", - 13: "permission denied", - 14: "bad address", - 15: "block device required", - 16: "device or resource busy", - 17: "file exists", - 18: "invalid cross-device link", - 19: "no such device", - 20: "not a directory", - 21: "is a directory", - 22: "invalid argument", - 23: "too many open files in system", - 24: "too many open files", - 25: "inappropriate ioctl for device", - 26: "text file busy", - 27: "file too large", - 28: "no space left on device", - 29: "illegal seek", - 30: "read-only file system", - 31: "too many links", - 32: "broken pipe", - 33: "numerical argument out of domain", - 34: "numerical result out of range", - 35: "resource deadlock avoided", - 36: "file name too long", - 37: "no locks available", - 38: "function not implemented", - 39: "directory not empty", - 40: "too many levels of symbolic links", - 42: "no message of desired type", - 43: "identifier removed", - 44: "channel number out of range", - 45: "level 2 not synchronized", - 46: "level 3 halted", - 47: "level 3 reset", - 48: "link number out of range", - 49: "protocol driver not attached", - 50: "no CSI structure available", - 51: "level 2 halted", - 52: "invalid exchange", - 53: "invalid request descriptor", - 54: "exchange full", - 55: "no anode", - 56: "invalid request code", - 57: "invalid slot", - 59: "bad font file format", - 60: "device not a stream", - 61: "no data available", - 62: "timer expired", - 63: "out of streams resources", - 64: "machine is not on the network", - 65: "package not installed", - 66: "object is remote", - 67: "link has been severed", - 68: "advertise error", - 69: "srmount error", - 70: "communication error on send", - 71: "protocol error", - 72: "multihop attempted", - 73: "RFS specific error", - 74: "bad message", - 75: "value too large for defined data type", - 76: "name not unique on network", - 77: "file descriptor in bad state", - 78: "remote address changed", - 79: "can not access a needed shared library", - 80: "accessing a corrupted shared library", - 81: ".lib section in a.out corrupted", - 82: "attempting to link in too many shared libraries", - 83: "cannot exec a shared library directly", - 84: "invalid or incomplete multibyte or wide character", - 85: "interrupted system call should be restarted", - 86: "streams pipe error", - 87: "too many users", - 88: "socket operation on non-socket", - 89: "destination address required", - 90: "message too long", - 91: "protocol wrong type for socket", - 92: "protocol not available", - 93: "protocol not supported", - 94: "socket type not supported", - 95: "operation not supported", - 96: "protocol family not supported", - 97: "address family not supported by protocol", - 98: "address already in use", - 99: "cannot assign requested address", - 100: "network is down", - 101: "network is unreachable", - 102: "network dropped connection on reset", - 103: "software caused connection abort", - 104: "connection reset by peer", - 105: "no buffer space available", - 106: "transport endpoint is already connected", - 107: "transport endpoint is not connected", - 108: "cannot send after transport endpoint shutdown", - 109: "too many references: cannot splice", - 110: "connection timed out", - 111: "connection refused", - 112: "host is down", - 113: "no route to host", - 114: "operation already in progress", - 115: "operation now in progress", - 116: "stale file handle", - 117: "structure needs cleaning", - 118: "not a XENIX named type file", - 119: "no XENIX semaphores available", - 120: "is a named type file", - 121: "remote I/O error", - 122: "disk quota exceeded", - 123: "no medium found", - 124: "wrong medium type", - 125: "operation canceled", - 126: "required key not available", - 127: "key has expired", - 128: "key has been revoked", - 129: "key was rejected by service", - 130: "owner died", - 131: "state not recoverable", - 132: "operation not possible due to RF-kill", - 133: "memory page has hardware error", +var errorList = [...]struct { + num syscall.Errno + name string + desc string +}{ + {1, "EPERM", "operation not permitted"}, + {2, "ENOENT", "no such file or directory"}, + {3, "ESRCH", "no such process"}, + {4, "EINTR", "interrupted system call"}, + {5, "EIO", "input/output error"}, + {6, "ENXIO", "no such device or address"}, + {7, "E2BIG", "argument list too long"}, + {8, "ENOEXEC", "exec format error"}, + {9, "EBADF", "bad file descriptor"}, + {10, "ECHILD", "no child processes"}, + {11, "EAGAIN", "resource temporarily unavailable"}, + {12, "ENOMEM", "cannot allocate memory"}, + {13, "EACCES", "permission denied"}, + {14, "EFAULT", "bad address"}, + {15, "ENOTBLK", "block device required"}, + {16, "EBUSY", "device or resource busy"}, + {17, "EEXIST", "file exists"}, + {18, "EXDEV", "invalid cross-device link"}, + {19, "ENODEV", "no such device"}, + {20, "ENOTDIR", "not a directory"}, + {21, "EISDIR", "is a directory"}, + {22, "EINVAL", "invalid argument"}, + {23, "ENFILE", "too many open files in system"}, + {24, "EMFILE", "too many open files"}, + {25, "ENOTTY", "inappropriate ioctl for device"}, + {26, "ETXTBSY", "text file busy"}, + {27, "EFBIG", "file too large"}, + {28, "ENOSPC", "no space left on device"}, + {29, "ESPIPE", "illegal seek"}, + {30, "EROFS", "read-only file system"}, + {31, "EMLINK", "too many links"}, + {32, "EPIPE", "broken pipe"}, + {33, "EDOM", "numerical argument out of domain"}, + {34, "ERANGE", "numerical result out of range"}, + {35, "EDEADLK", "resource deadlock avoided"}, + {36, "ENAMETOOLONG", "file name too long"}, + {37, "ENOLCK", "no locks available"}, + {38, "ENOSYS", "function not implemented"}, + {39, "ENOTEMPTY", "directory not empty"}, + {40, "ELOOP", "too many levels of symbolic links"}, + {42, "ENOMSG", "no message of desired type"}, + {43, "EIDRM", "identifier removed"}, + {44, "ECHRNG", "channel number out of range"}, + {45, "EL2NSYNC", "level 2 not synchronized"}, + {46, "EL3HLT", "level 3 halted"}, + {47, "EL3RST", "level 3 reset"}, + {48, "ELNRNG", "link number out of range"}, + {49, "EUNATCH", "protocol driver not attached"}, + {50, "ENOCSI", "no CSI structure available"}, + {51, "EL2HLT", "level 2 halted"}, + {52, "EBADE", "invalid exchange"}, + {53, "EBADR", "invalid request descriptor"}, + {54, "EXFULL", "exchange full"}, + {55, "ENOANO", "no anode"}, + {56, "EBADRQC", "invalid request code"}, + {57, "EBADSLT", "invalid slot"}, + {59, "EBFONT", "bad font file format"}, + {60, "ENOSTR", "device not a stream"}, + {61, "ENODATA", "no data available"}, + {62, "ETIME", "timer expired"}, + {63, "ENOSR", "out of streams resources"}, + {64, "ENONET", "machine is not on the network"}, + {65, "ENOPKG", "package not installed"}, + {66, "EREMOTE", "object is remote"}, + {67, "ENOLINK", "link has been severed"}, + {68, "EADV", "advertise error"}, + {69, "ESRMNT", "srmount error"}, + {70, "ECOMM", "communication error on send"}, + {71, "EPROTO", "protocol error"}, + {72, "EMULTIHOP", "multihop attempted"}, + {73, "EDOTDOT", "RFS specific error"}, + {74, "EBADMSG", "bad message"}, + {75, "EOVERFLOW", "value too large for defined data type"}, + {76, "ENOTUNIQ", "name not unique on network"}, + {77, "EBADFD", "file descriptor in bad state"}, + {78, "EREMCHG", "remote address changed"}, + {79, "ELIBACC", "can not access a needed shared library"}, + {80, "ELIBBAD", "accessing a corrupted shared library"}, + {81, "ELIBSCN", ".lib section in a.out corrupted"}, + {82, "ELIBMAX", "attempting to link in too many shared libraries"}, + {83, "ELIBEXEC", "cannot exec a shared library directly"}, + {84, "EILSEQ", "invalid or incomplete multibyte or wide character"}, + {85, "ERESTART", "interrupted system call should be restarted"}, + {86, "ESTRPIPE", "streams pipe error"}, + {87, "EUSERS", "too many users"}, + {88, "ENOTSOCK", "socket operation on non-socket"}, + {89, "EDESTADDRREQ", "destination address required"}, + {90, "EMSGSIZE", "message too long"}, + {91, "EPROTOTYPE", "protocol wrong type for socket"}, + {92, "ENOPROTOOPT", "protocol not available"}, + {93, "EPROTONOSUPPORT", "protocol not supported"}, + {94, "ESOCKTNOSUPPORT", "socket type not supported"}, + {95, "ENOTSUP", "operation not supported"}, + {96, "EPFNOSUPPORT", "protocol family not supported"}, + {97, "EAFNOSUPPORT", "address family not supported by protocol"}, + {98, "EADDRINUSE", "address already in use"}, + {99, "EADDRNOTAVAIL", "cannot assign requested address"}, + {100, "ENETDOWN", "network is down"}, + {101, "ENETUNREACH", "network is unreachable"}, + {102, "ENETRESET", "network dropped connection on reset"}, + {103, "ECONNABORTED", "software caused connection abort"}, + {104, "ECONNRESET", "connection reset by peer"}, + {105, "ENOBUFS", "no buffer space available"}, + {106, "EISCONN", "transport endpoint is already connected"}, + {107, "ENOTCONN", "transport endpoint is not connected"}, + {108, "ESHUTDOWN", "cannot send after transport endpoint shutdown"}, + {109, "ETOOMANYREFS", "too many references: cannot splice"}, + {110, "ETIMEDOUT", "connection timed out"}, + {111, "ECONNREFUSED", "connection refused"}, + {112, "EHOSTDOWN", "host is down"}, + {113, "EHOSTUNREACH", "no route to host"}, + {114, "EALREADY", "operation already in progress"}, + {115, "EINPROGRESS", "operation now in progress"}, + {116, "ESTALE", "stale file handle"}, + {117, "EUCLEAN", "structure needs cleaning"}, + {118, "ENOTNAM", "not a XENIX named type file"}, + {119, "ENAVAIL", "no XENIX semaphores available"}, + {120, "EISNAM", "is a named type file"}, + {121, "EREMOTEIO", "remote I/O error"}, + {122, "EDQUOT", "disk quota exceeded"}, + {123, "ENOMEDIUM", "no medium found"}, + {124, "EMEDIUMTYPE", "wrong medium type"}, + {125, "ECANCELED", "operation canceled"}, + {126, "ENOKEY", "required key not available"}, + {127, "EKEYEXPIRED", "key has expired"}, + {128, "EKEYREVOKED", "key has been revoked"}, + {129, "EKEYREJECTED", "key was rejected by service"}, + {130, "EOWNERDEAD", "owner died"}, + {131, "ENOTRECOVERABLE", "state not recoverable"}, + {132, "ERFKILL", "operation not possible due to RF-kill"}, + {133, "EHWPOISON", "memory page has hardware error"}, } // Signal table -var signals = [...]string{ - 1: "hangup", - 2: "interrupt", - 3: "quit", - 4: "illegal instruction", - 5: "trace/breakpoint trap", - 6: "aborted", - 7: "bus error", - 8: "floating point exception", - 9: "killed", - 10: "user defined signal 1", - 11: "segmentation fault", - 12: "user defined signal 2", - 13: "broken pipe", - 14: "alarm clock", - 15: "terminated", - 16: "stack fault", - 17: "child exited", - 18: "continued", - 19: "stopped (signal)", - 20: "stopped", - 21: "stopped (tty input)", - 22: "stopped (tty output)", - 23: "urgent I/O condition", - 24: "CPU time limit exceeded", - 25: "file size limit exceeded", - 26: "virtual timer expired", - 27: "profiling timer expired", - 28: "window changed", - 29: "I/O possible", - 30: "power failure", - 31: "bad system call", +var signalList = [...]struct { + num syscall.Signal + name string + desc string +}{ + {1, "SIGHUP", "hangup"}, + {2, "SIGINT", "interrupt"}, + {3, "SIGQUIT", "quit"}, + {4, "SIGILL", "illegal instruction"}, + {5, "SIGTRAP", "trace/breakpoint trap"}, + {6, "SIGABRT", "aborted"}, + {7, "SIGBUS", "bus error"}, + {8, "SIGFPE", "floating point exception"}, + {9, "SIGKILL", "killed"}, + {10, "SIGUSR1", "user defined signal 1"}, + {11, "SIGSEGV", "segmentation fault"}, + {12, "SIGUSR2", "user defined signal 2"}, + {13, "SIGPIPE", "broken pipe"}, + {14, "SIGALRM", "alarm clock"}, + {15, "SIGTERM", "terminated"}, + {16, "SIGSTKFLT", "stack fault"}, + {17, "SIGCHLD", "child exited"}, + {18, "SIGCONT", "continued"}, + {19, "SIGSTOP", "stopped (signal)"}, + {20, "SIGTSTP", "stopped"}, + {21, "SIGTTIN", "stopped (tty input)"}, + {22, "SIGTTOU", "stopped (tty output)"}, + {23, "SIGURG", "urgent I/O condition"}, + {24, "SIGXCPU", "CPU time limit exceeded"}, + {25, "SIGXFSZ", "file size limit exceeded"}, + {26, "SIGVTALRM", "virtual timer expired"}, + {27, "SIGPROF", "profiling timer expired"}, + {28, "SIGWINCH", "window changed"}, + {29, "SIGIO", "I/O possible"}, + {30, "SIGPWR", "power failure"}, + {31, "SIGSYS", "bad system call"}, } diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go index eb2a22f6501..64ab9f40ade 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go @@ -11,6 +11,11 @@ package unix import "syscall" const ( + AAFS_MAGIC = 0x5a3c69f0 + ADFS_SUPER_MAGIC = 0xadf5 + AFFS_SUPER_MAGIC = 0xadff + AFS_FS_MAGIC = 0x6b414653 + AFS_SUPER_MAGIC = 0x5346414f AF_ALG = 0x26 AF_APPLETALK = 0x5 AF_ASH = 0x12 @@ -66,6 +71,7 @@ const ( ALG_SET_IV = 0x2 ALG_SET_KEY = 0x1 ALG_SET_OP = 0x3 + ANON_INODE_FS_MAGIC = 0x9041934 ARPHRD_6LOWPAN = 0x339 ARPHRD_ADAPT = 0x108 ARPHRD_APPLETLK = 0x8 @@ -133,6 +139,7 @@ const ( ARPHRD_VOID = 0xffff ARPHRD_VSOCKMON = 0x33a ARPHRD_X25 = 0x10f + AUTOFS_SUPER_MAGIC = 0x187 B0 = 0x0 B1000000 = 0x1008 B110 = 0x3 @@ -164,6 +171,9 @@ const ( B75 = 0x2 B921600 = 0x1007 B9600 = 0xd + BALLOON_KVM_MAGIC = 0x13661366 + BDEVFS_MAGIC = 0x62646576 + BINFMTFS_MAGIC = 0x42494e4d BLKBSZGET = 0x80081270 BLKBSZSET = 0x40081271 BLKFLSBUF = 0x1261 @@ -188,6 +198,7 @@ const ( BPF_AND = 0x50 BPF_B = 0x10 BPF_DIV = 0x30 + BPF_FS_MAGIC = 0xcafe4a11 BPF_H = 0x8 BPF_IMM = 0x0 BPF_IND = 0x40 @@ -229,6 +240,8 @@ const ( BS0 = 0x0 BS1 = 0x2000 BSDLY = 0x2000 + BTRFS_SUPER_MAGIC = 0x9123683e + BTRFS_TEST_MAGIC = 0x73727279 CAN_BCM = 0x2 CAN_EFF_FLAG = 0x80000000 CAN_EFF_ID_BITS = 0x1d @@ -252,6 +265,8 @@ const ( CBAUD = 0x100f CBAUDEX = 0x1000 CFLUSH = 0xf + CGROUP2_SUPER_MAGIC = 0x63677270 + CGROUP_SUPER_MAGIC = 0x27e0eb CIBAUD = 0x100f0000 CLOCAL = 0x800 CLOCK_BOOTTIME = 0x7 @@ -294,10 +309,12 @@ const ( CLONE_VFORK = 0x4000 CLONE_VM = 0x100 CMSPAR = 0x40000000 + CODA_SUPER_MAGIC = 0x73757245 CR0 = 0x0 CR1 = 0x200 CR2 = 0x400 CR3 = 0x600 + CRAMFS_MAGIC = 0x28cd3d45 CRDLY = 0x600 CREAD = 0x80 CRTSCTS = 0x80000000 @@ -312,6 +329,9 @@ const ( CSTOP = 0x13 CSTOPB = 0x40 CSUSP = 0x1a + DAXFS_MAGIC = 0x64646178 + DEBUGFS_MAGIC = 0x64626720 + DEVPTS_SUPER_MAGIC = 0x1cd1 DT_BLK = 0x6 DT_CHR = 0x2 DT_DIR = 0x4 @@ -328,9 +348,12 @@ const ( ECHOKE = 0x800 ECHONL = 0x40 ECHOPRT = 0x400 + ECRYPTFS_SUPER_MAGIC = 0xf15f EFD_CLOEXEC = 0x80000 EFD_NONBLOCK = 0x800 EFD_SEMAPHORE = 0x1 + EFIVARFS_MAGIC = 0xde5e81e4 + EFS_SUPER_MAGIC = 0x414a53 ENCODING_DEFAULT = 0x0 ENCODING_FM_MARK = 0x3 ENCODING_FM_SPACE = 0x4 @@ -392,6 +415,7 @@ const ( ETH_P_ECONET = 0x18 ETH_P_EDSA = 0xdada ETH_P_ERSPAN = 0x88be + ETH_P_ERSPAN2 = 0x22eb ETH_P_FCOE = 0x8906 ETH_P_FIP = 0x8914 ETH_P_HDLC = 0x19 @@ -445,9 +469,14 @@ const ( ETH_P_WCCP = 0x883e ETH_P_X25 = 0x805 ETH_P_XDSA = 0xf8 + EXABYTE_ENABLE_NEST = 0xf0 + EXT2_SUPER_MAGIC = 0xef53 + EXT3_SUPER_MAGIC = 0xef53 + EXT4_SUPER_MAGIC = 0xef53 EXTA = 0xe EXTB = 0xf EXTPROC = 0x10000 + F2FS_SUPER_MAGIC = 0xf2f52010 FALLOC_FL_COLLAPSE_RANGE = 0x8 FALLOC_FL_INSERT_RANGE = 0x20 FALLOC_FL_KEEP_SIZE = 0x1 @@ -461,6 +490,7 @@ const ( FF1 = 0x8000 FFDLY = 0x8000 FLUSHO = 0x1000 + FP_XSTATE_MAGIC2 = 0x46505845 FS_ENCRYPTION_MODE_AES_128_CBC = 0x5 FS_ENCRYPTION_MODE_AES_128_CTS = 0x6 FS_ENCRYPTION_MODE_AES_256_CBC = 0x3 @@ -481,6 +511,7 @@ const ( FS_POLICY_FLAGS_PAD_8 = 0x1 FS_POLICY_FLAGS_PAD_MASK = 0x3 FS_POLICY_FLAGS_VALID = 0x3 + FUTEXFS_SUPER_MAGIC = 0xbad1dea F_ADD_SEALS = 0x409 F_DUPFD = 0x0 F_DUPFD_CLOEXEC = 0x406 @@ -542,6 +573,49 @@ const ( GENL_UNS_ADMIN_PERM = 0x10 GRND_NONBLOCK = 0x1 GRND_RANDOM = 0x2 + HDIO_DRIVE_CMD = 0x31f + HDIO_DRIVE_CMD_AEB = 0x31e + HDIO_DRIVE_CMD_HDR_SIZE = 0x4 + HDIO_DRIVE_HOB_HDR_SIZE = 0x8 + HDIO_DRIVE_RESET = 0x31c + HDIO_DRIVE_TASK = 0x31e + HDIO_DRIVE_TASKFILE = 0x31d + HDIO_DRIVE_TASK_HDR_SIZE = 0x8 + HDIO_GETGEO = 0x301 + HDIO_GET_32BIT = 0x309 + HDIO_GET_ACOUSTIC = 0x30f + HDIO_GET_ADDRESS = 0x310 + HDIO_GET_BUSSTATE = 0x31a + HDIO_GET_DMA = 0x30b + HDIO_GET_IDENTITY = 0x30d + HDIO_GET_KEEPSETTINGS = 0x308 + HDIO_GET_MULTCOUNT = 0x304 + HDIO_GET_NICE = 0x30c + HDIO_GET_NOWERR = 0x30a + HDIO_GET_QDMA = 0x305 + HDIO_GET_UNMASKINTR = 0x302 + HDIO_GET_WCACHE = 0x30e + HDIO_OBSOLETE_IDENTITY = 0x307 + HDIO_SCAN_HWIF = 0x328 + HDIO_SET_32BIT = 0x324 + HDIO_SET_ACOUSTIC = 0x32c + HDIO_SET_ADDRESS = 0x32f + HDIO_SET_BUSSTATE = 0x32d + HDIO_SET_DMA = 0x326 + HDIO_SET_KEEPSETTINGS = 0x323 + HDIO_SET_MULTCOUNT = 0x321 + HDIO_SET_NICE = 0x329 + HDIO_SET_NOWERR = 0x325 + HDIO_SET_PIO_MODE = 0x327 + HDIO_SET_QDMA = 0x32e + HDIO_SET_UNMASKINTR = 0x322 + HDIO_SET_WCACHE = 0x32b + HDIO_SET_XFER = 0x306 + HDIO_TRISTATE_HWIF = 0x31b + HDIO_UNREGISTER_HWIF = 0x32a + HOSTFS_SUPER_MAGIC = 0xc0ffee + HPFS_SUPER_MAGIC = 0xf995e849 + HUGETLBFS_MAGIC = 0x958458f6 HUPCL = 0x400 IBSHIFT = 0x10 ICANON = 0x2 @@ -792,12 +866,14 @@ const ( IP_UNICAST_IF = 0x32 IP_XFRM_POLICY = 0x11 ISIG = 0x1 + ISOFS_SUPER_MAGIC = 0x9660 ISTRIP = 0x20 IUCLC = 0x200 IUTF8 = 0x4000 IXANY = 0x800 IXOFF = 0x1000 IXON = 0x400 + JFFS2_SUPER_MAGIC = 0x72b6 KEYCTL_ASSUME_AUTHORITY = 0x10 KEYCTL_CHOWN = 0x4 KEYCTL_CLEAR = 0x7 @@ -894,9 +970,15 @@ const ( MCL_CURRENT = 0x1 MCL_FUTURE = 0x2 MCL_ONFAULT = 0x4 + MINIX2_SUPER_MAGIC = 0x2468 + MINIX2_SUPER_MAGIC2 = 0x2478 + MINIX3_SUPER_MAGIC = 0x4d5a + MINIX_SUPER_MAGIC = 0x137f + MINIX_SUPER_MAGIC2 = 0x138f MNT_DETACH = 0x2 MNT_EXPIRE = 0x4 MNT_FORCE = 0x1 + MSDOS_SUPER_MAGIC = 0x4d44 MSG_BATCH = 0x40000 MSG_CMSG_CLOEXEC = 0x40000000 MSG_CONFIRM = 0x800 @@ -956,7 +1038,9 @@ const ( MS_SYNCHRONOUS = 0x10 MS_UNBINDABLE = 0x20000 MS_VERBOSE = 0x8000 + MTD_INODE_FS_MAGIC = 0x11307854 NAME_MAX = 0xff + NCP_SUPER_MAGIC = 0x564c NETLINK_ADD_MEMBERSHIP = 0x1 NETLINK_AUDIT = 0x9 NETLINK_BROADCAST_ERROR = 0x4 @@ -991,6 +1075,37 @@ const ( NETLINK_UNUSED = 0x1 NETLINK_USERSOCK = 0x2 NETLINK_XFRM = 0x6 + NFNETLINK_V0 = 0x0 + NFNLGRP_ACCT_QUOTA = 0x8 + NFNLGRP_CONNTRACK_DESTROY = 0x3 + NFNLGRP_CONNTRACK_EXP_DESTROY = 0x6 + NFNLGRP_CONNTRACK_EXP_NEW = 0x4 + NFNLGRP_CONNTRACK_EXP_UPDATE = 0x5 + NFNLGRP_CONNTRACK_NEW = 0x1 + NFNLGRP_CONNTRACK_UPDATE = 0x2 + NFNLGRP_MAX = 0x9 + NFNLGRP_NFTABLES = 0x7 + NFNLGRP_NFTRACE = 0x9 + NFNLGRP_NONE = 0x0 + NFNL_BATCH_MAX = 0x1 + NFNL_MSG_BATCH_BEGIN = 0x10 + NFNL_MSG_BATCH_END = 0x11 + NFNL_NFA_NEST = 0x8000 + NFNL_SUBSYS_ACCT = 0x7 + NFNL_SUBSYS_COUNT = 0xc + NFNL_SUBSYS_CTHELPER = 0x9 + NFNL_SUBSYS_CTNETLINK = 0x1 + NFNL_SUBSYS_CTNETLINK_EXP = 0x2 + NFNL_SUBSYS_CTNETLINK_TIMEOUT = 0x8 + NFNL_SUBSYS_IPSET = 0x6 + NFNL_SUBSYS_NFTABLES = 0xa + NFNL_SUBSYS_NFT_COMPAT = 0xb + NFNL_SUBSYS_NONE = 0x0 + NFNL_SUBSYS_OSF = 0x5 + NFNL_SUBSYS_QUEUE = 0x3 + NFNL_SUBSYS_ULOG = 0x4 + NFS_SUPER_MAGIC = 0x6969 + NILFS_SUPER_MAGIC = 0x3434 NL0 = 0x0 NL1 = 0x100 NLA_ALIGNTO = 0x4 @@ -1023,6 +1138,8 @@ const ( NLM_F_REQUEST = 0x1 NLM_F_ROOT = 0x100 NOFLSH = 0x80 + NSFS_MAGIC = 0x6e736673 + OCFS2_SUPER_MAGIC = 0x7461636f OCRNL = 0x8 OFDEL = 0x80 OFILL = 0x40 @@ -1030,7 +1147,9 @@ const ( ONLCR = 0x4 ONLRET = 0x20 ONOCR = 0x10 + OPENPROM_SUPER_MAGIC = 0x9fa1 OPOST = 0x1 + OVERLAYFS_SUPER_MAGIC = 0x794c7630 O_ACCMODE = 0x3 O_APPEND = 0x400 O_ASYNC = 0x2000 @@ -1117,14 +1236,17 @@ const ( PERF_EVENT_IOC_ID = 0x80082407 PERF_EVENT_IOC_PAUSE_OUTPUT = 0x40042409 PERF_EVENT_IOC_PERIOD = 0x40082404 + PERF_EVENT_IOC_QUERY_BPF = 0xc008240a PERF_EVENT_IOC_REFRESH = 0x2402 PERF_EVENT_IOC_RESET = 0x2403 PERF_EVENT_IOC_SET_BPF = 0x40042408 PERF_EVENT_IOC_SET_FILTER = 0x40082406 PERF_EVENT_IOC_SET_OUTPUT = 0x2405 + PIPEFS_MAGIC = 0x50495045 PRIO_PGRP = 0x1 PRIO_PROCESS = 0x0 PRIO_USER = 0x2 + PROC_SUPER_MAGIC = 0x9fa0 PROT_EXEC = 0x4 PROT_GROWSDOWN = 0x1000000 PROT_GROWSUP = 0x2000000 @@ -1230,6 +1352,7 @@ const ( PR_TSC_SIGSEGV = 0x2 PR_UNALIGN_NOPRINT = 0x1 PR_UNALIGN_SIGBUS = 0x2 + PSTOREFS_MAGIC = 0x6165676c PTRACE_ARCH_PRCTL = 0x1e PTRACE_ATTACH = 0x10 PTRACE_CONT = 0x7 @@ -1274,6 +1397,7 @@ const ( PTRACE_POKETEXT = 0x4 PTRACE_POKEUSR = 0x6 PTRACE_SECCOMP_GET_FILTER = 0x420c + PTRACE_SECCOMP_GET_METADATA = 0x420d PTRACE_SEIZE = 0x4206 PTRACE_SETFPREGS = 0xf PTRACE_SETFPXREGS = 0x13 @@ -1289,6 +1413,11 @@ const ( PTRACE_SYSEMU = 0x1f PTRACE_SYSEMU_SINGLESTEP = 0x20 PTRACE_TRACEME = 0x0 + QNX4_SUPER_MAGIC = 0x2f + QNX6_SUPER_MAGIC = 0x68191122 + RAMFS_MAGIC = 0x858458f6 + RDTGROUP_SUPER_MAGIC = 0x7655821 + REISERFS_SUPER_MAGIC = 0x52654973 RLIMIT_AS = 0x9 RLIMIT_CORE = 0x4 RLIMIT_CPU = 0x0 @@ -1474,6 +1603,8 @@ const ( SECCOMP_MODE_DISABLED = 0x0 SECCOMP_MODE_FILTER = 0x2 SECCOMP_MODE_STRICT = 0x1 + SECURITYFS_MAGIC = 0x73636673 + SELINUX_MAGIC = 0xf97cff8c SHUT_RD = 0x0 SHUT_RDWR = 0x2 SHUT_WR = 0x1 @@ -1558,6 +1689,23 @@ const ( SIOCSPGRP = 0x8902 SIOCSRARP = 0x8962 SIOCWANDEV = 0x894a + SMACK_MAGIC = 0x43415d53 + SMART_AUTOSAVE = 0xd2 + SMART_AUTO_OFFLINE = 0xdb + SMART_DISABLE = 0xd9 + SMART_ENABLE = 0xd8 + SMART_HCYL_PASS = 0xc2 + SMART_IMMEDIATE_OFFLINE = 0xd4 + SMART_LCYL_PASS = 0x4f + SMART_READ_LOG_SECTOR = 0xd5 + SMART_READ_THRESHOLDS = 0xd1 + SMART_READ_VALUES = 0xd0 + SMART_SAVE = 0xd3 + SMART_STATUS = 0xda + SMART_WRITE_LOG_SECTOR = 0xd6 + SMART_WRITE_THRESHOLDS = 0xd7 + SMB_SUPER_MAGIC = 0x517b + SOCKFS_MAGIC = 0x534f434b SOCK_CLOEXEC = 0x80000 SOCK_DCCP = 0x6 SOCK_DGRAM = 0x2 @@ -1668,6 +1816,8 @@ const ( SPLICE_F_MORE = 0x4 SPLICE_F_MOVE = 0x1 SPLICE_F_NONBLOCK = 0x2 + SQUASHFS_MAGIC = 0x73717368 + STACK_END_MAGIC = 0x57ac6e9d STATX_ALL = 0xfff STATX_ATIME = 0x20 STATX_ATTR_APPEND = 0x20 @@ -1689,6 +1839,7 @@ const ( STATX_TYPE = 0x1 STATX_UID = 0x8 STATX__RESERVED = 0x80000000 + SYSFS_MAGIC = 0x62656572 S_BLKSIZE = 0x200 S_IEXEC = 0x40 S_IFBLK = 0x6000 @@ -1870,7 +2021,27 @@ const ( TIOCSTI = 0x5412 TIOCSWINSZ = 0x5414 TIOCVHANGUP = 0x5437 + TMPFS_MAGIC = 0x1021994 TOSTOP = 0x100 + TPACKET_ALIGNMENT = 0x10 + TPACKET_HDRLEN = 0x34 + TP_STATUS_AVAILABLE = 0x0 + TP_STATUS_BLK_TMO = 0x20 + TP_STATUS_COPY = 0x2 + TP_STATUS_CSUMNOTREADY = 0x8 + TP_STATUS_CSUM_VALID = 0x80 + TP_STATUS_KERNEL = 0x0 + TP_STATUS_LOSING = 0x4 + TP_STATUS_SENDING = 0x2 + TP_STATUS_SEND_REQUEST = 0x1 + TP_STATUS_TS_RAW_HARDWARE = -0x80000000 + TP_STATUS_TS_SOFTWARE = 0x20000000 + TP_STATUS_TS_SYS_HARDWARE = 0x40000000 + TP_STATUS_USER = 0x1 + TP_STATUS_VLAN_TPID_VALID = 0x40 + TP_STATUS_VLAN_VALID = 0x10 + TP_STATUS_WRONG_FORMAT = 0x4 + TRACEFS_MAGIC = 0x74726163 TS_COMM_LEN = 0x20 TUNATTACHFILTER = 0x401054d5 TUNDETACHFILTER = 0x401054d6 @@ -1882,6 +2053,7 @@ const ( TUNGETVNETHDRSZ = 0x800454d7 TUNGETVNETLE = 0x800454dd TUNSETDEBUG = 0x400454c9 + TUNSETFILTEREBPF = 0x800454e1 TUNSETGROUP = 0x400454ce TUNSETIFF = 0x400454ca TUNSETIFINDEX = 0x400454da @@ -1892,13 +2064,17 @@ const ( TUNSETPERSIST = 0x400454cb TUNSETQUEUE = 0x400454d9 TUNSETSNDBUF = 0x400454d4 + TUNSETSTEERINGEBPF = 0x800454e0 TUNSETTXFILTER = 0x400454d1 TUNSETVNETBE = 0x400454de TUNSETVNETHDRSZ = 0x400454d8 TUNSETVNETLE = 0x400454dc + UDF_SUPER_MAGIC = 0x15013346 UMOUNT_NOFOLLOW = 0x8 + USBDEVICE_SUPER_MAGIC = 0x9fa2 UTIME_NOW = 0x3fffffff UTIME_OMIT = 0x3ffffffe + V9FS_MAGIC = 0x1021997 VDISCARD = 0xd VEOF = 0x4 VEOL = 0xb @@ -1940,6 +2116,86 @@ const ( WDIOC_SETPRETIMEOUT = 0xc0045708 WDIOC_SETTIMEOUT = 0xc0045706 WEXITED = 0x4 + WIN_ACKMEDIACHANGE = 0xdb + WIN_CHECKPOWERMODE1 = 0xe5 + WIN_CHECKPOWERMODE2 = 0x98 + WIN_DEVICE_RESET = 0x8 + WIN_DIAGNOSE = 0x90 + WIN_DOORLOCK = 0xde + WIN_DOORUNLOCK = 0xdf + WIN_DOWNLOAD_MICROCODE = 0x92 + WIN_FLUSH_CACHE = 0xe7 + WIN_FLUSH_CACHE_EXT = 0xea + WIN_FORMAT = 0x50 + WIN_GETMEDIASTATUS = 0xda + WIN_IDENTIFY = 0xec + WIN_IDENTIFY_DMA = 0xee + WIN_IDLEIMMEDIATE = 0xe1 + WIN_INIT = 0x60 + WIN_MEDIAEJECT = 0xed + WIN_MULTREAD = 0xc4 + WIN_MULTREAD_EXT = 0x29 + WIN_MULTWRITE = 0xc5 + WIN_MULTWRITE_EXT = 0x39 + WIN_NOP = 0x0 + WIN_PACKETCMD = 0xa0 + WIN_PIDENTIFY = 0xa1 + WIN_POSTBOOT = 0xdc + WIN_PREBOOT = 0xdd + WIN_QUEUED_SERVICE = 0xa2 + WIN_READ = 0x20 + WIN_READDMA = 0xc8 + WIN_READDMA_EXT = 0x25 + WIN_READDMA_ONCE = 0xc9 + WIN_READDMA_QUEUED = 0xc7 + WIN_READDMA_QUEUED_EXT = 0x26 + WIN_READ_BUFFER = 0xe4 + WIN_READ_EXT = 0x24 + WIN_READ_LONG = 0x22 + WIN_READ_LONG_ONCE = 0x23 + WIN_READ_NATIVE_MAX = 0xf8 + WIN_READ_NATIVE_MAX_EXT = 0x27 + WIN_READ_ONCE = 0x21 + WIN_RECAL = 0x10 + WIN_RESTORE = 0x10 + WIN_SECURITY_DISABLE = 0xf6 + WIN_SECURITY_ERASE_PREPARE = 0xf3 + WIN_SECURITY_ERASE_UNIT = 0xf4 + WIN_SECURITY_FREEZE_LOCK = 0xf5 + WIN_SECURITY_SET_PASS = 0xf1 + WIN_SECURITY_UNLOCK = 0xf2 + WIN_SEEK = 0x70 + WIN_SETFEATURES = 0xef + WIN_SETIDLE1 = 0xe3 + WIN_SETIDLE2 = 0x97 + WIN_SETMULT = 0xc6 + WIN_SET_MAX = 0xf9 + WIN_SET_MAX_EXT = 0x37 + WIN_SLEEPNOW1 = 0xe6 + WIN_SLEEPNOW2 = 0x99 + WIN_SMART = 0xb0 + WIN_SPECIFY = 0x91 + WIN_SRST = 0x8 + WIN_STANDBY = 0xe2 + WIN_STANDBY2 = 0x96 + WIN_STANDBYNOW1 = 0xe0 + WIN_STANDBYNOW2 = 0x94 + WIN_VERIFY = 0x40 + WIN_VERIFY_EXT = 0x42 + WIN_VERIFY_ONCE = 0x41 + WIN_WRITE = 0x30 + WIN_WRITEDMA = 0xca + WIN_WRITEDMA_EXT = 0x35 + WIN_WRITEDMA_ONCE = 0xcb + WIN_WRITEDMA_QUEUED = 0xcc + WIN_WRITEDMA_QUEUED_EXT = 0x36 + WIN_WRITE_BUFFER = 0xe8 + WIN_WRITE_EXT = 0x34 + WIN_WRITE_LONG = 0x32 + WIN_WRITE_LONG_ONCE = 0x33 + WIN_WRITE_ONCE = 0x31 + WIN_WRITE_SAME = 0xe9 + WIN_WRITE_VERIFY = 0x3c WNOHANG = 0x1 WNOTHREAD = 0x20000000 WNOWAIT = 0x1000000 @@ -1949,7 +2205,9 @@ const ( XATTR_CREATE = 0x1 XATTR_REPLACE = 0x2 XCASE = 0x4 + XENFS_SUPER_MAGIC = 0xabba1974 XTABS = 0x1800 + ZSMALLOC_MAGIC = 0x58295829 ) // Errors @@ -2129,171 +2387,179 @@ const ( ) // Error table -var errors = [...]string{ - 1: "operation not permitted", - 2: "no such file or directory", - 3: "no such process", - 4: "interrupted system call", - 5: "input/output error", - 6: "no such device or address", - 7: "argument list too long", - 8: "exec format error", - 9: "bad file descriptor", - 10: "no child processes", - 11: "resource temporarily unavailable", - 12: "cannot allocate memory", - 13: "permission denied", - 14: "bad address", - 15: "block device required", - 16: "device or resource busy", - 17: "file exists", - 18: "invalid cross-device link", - 19: "no such device", - 20: "not a directory", - 21: "is a directory", - 22: "invalid argument", - 23: "too many open files in system", - 24: "too many open files", - 25: "inappropriate ioctl for device", - 26: "text file busy", - 27: "file too large", - 28: "no space left on device", - 29: "illegal seek", - 30: "read-only file system", - 31: "too many links", - 32: "broken pipe", - 33: "numerical argument out of domain", - 34: "numerical result out of range", - 35: "resource deadlock avoided", - 36: "file name too long", - 37: "no locks available", - 38: "function not implemented", - 39: "directory not empty", - 40: "too many levels of symbolic links", - 42: "no message of desired type", - 43: "identifier removed", - 44: "channel number out of range", - 45: "level 2 not synchronized", - 46: "level 3 halted", - 47: "level 3 reset", - 48: "link number out of range", - 49: "protocol driver not attached", - 50: "no CSI structure available", - 51: "level 2 halted", - 52: "invalid exchange", - 53: "invalid request descriptor", - 54: "exchange full", - 55: "no anode", - 56: "invalid request code", - 57: "invalid slot", - 59: "bad font file format", - 60: "device not a stream", - 61: "no data available", - 62: "timer expired", - 63: "out of streams resources", - 64: "machine is not on the network", - 65: "package not installed", - 66: "object is remote", - 67: "link has been severed", - 68: "advertise error", - 69: "srmount error", - 70: "communication error on send", - 71: "protocol error", - 72: "multihop attempted", - 73: "RFS specific error", - 74: "bad message", - 75: "value too large for defined data type", - 76: "name not unique on network", - 77: "file descriptor in bad state", - 78: "remote address changed", - 79: "can not access a needed shared library", - 80: "accessing a corrupted shared library", - 81: ".lib section in a.out corrupted", - 82: "attempting to link in too many shared libraries", - 83: "cannot exec a shared library directly", - 84: "invalid or incomplete multibyte or wide character", - 85: "interrupted system call should be restarted", - 86: "streams pipe error", - 87: "too many users", - 88: "socket operation on non-socket", - 89: "destination address required", - 90: "message too long", - 91: "protocol wrong type for socket", - 92: "protocol not available", - 93: "protocol not supported", - 94: "socket type not supported", - 95: "operation not supported", - 96: "protocol family not supported", - 97: "address family not supported by protocol", - 98: "address already in use", - 99: "cannot assign requested address", - 100: "network is down", - 101: "network is unreachable", - 102: "network dropped connection on reset", - 103: "software caused connection abort", - 104: "connection reset by peer", - 105: "no buffer space available", - 106: "transport endpoint is already connected", - 107: "transport endpoint is not connected", - 108: "cannot send after transport endpoint shutdown", - 109: "too many references: cannot splice", - 110: "connection timed out", - 111: "connection refused", - 112: "host is down", - 113: "no route to host", - 114: "operation already in progress", - 115: "operation now in progress", - 116: "stale file handle", - 117: "structure needs cleaning", - 118: "not a XENIX named type file", - 119: "no XENIX semaphores available", - 120: "is a named type file", - 121: "remote I/O error", - 122: "disk quota exceeded", - 123: "no medium found", - 124: "wrong medium type", - 125: "operation canceled", - 126: "required key not available", - 127: "key has expired", - 128: "key has been revoked", - 129: "key was rejected by service", - 130: "owner died", - 131: "state not recoverable", - 132: "operation not possible due to RF-kill", - 133: "memory page has hardware error", +var errorList = [...]struct { + num syscall.Errno + name string + desc string +}{ + {1, "EPERM", "operation not permitted"}, + {2, "ENOENT", "no such file or directory"}, + {3, "ESRCH", "no such process"}, + {4, "EINTR", "interrupted system call"}, + {5, "EIO", "input/output error"}, + {6, "ENXIO", "no such device or address"}, + {7, "E2BIG", "argument list too long"}, + {8, "ENOEXEC", "exec format error"}, + {9, "EBADF", "bad file descriptor"}, + {10, "ECHILD", "no child processes"}, + {11, "EAGAIN", "resource temporarily unavailable"}, + {12, "ENOMEM", "cannot allocate memory"}, + {13, "EACCES", "permission denied"}, + {14, "EFAULT", "bad address"}, + {15, "ENOTBLK", "block device required"}, + {16, "EBUSY", "device or resource busy"}, + {17, "EEXIST", "file exists"}, + {18, "EXDEV", "invalid cross-device link"}, + {19, "ENODEV", "no such device"}, + {20, "ENOTDIR", "not a directory"}, + {21, "EISDIR", "is a directory"}, + {22, "EINVAL", "invalid argument"}, + {23, "ENFILE", "too many open files in system"}, + {24, "EMFILE", "too many open files"}, + {25, "ENOTTY", "inappropriate ioctl for device"}, + {26, "ETXTBSY", "text file busy"}, + {27, "EFBIG", "file too large"}, + {28, "ENOSPC", "no space left on device"}, + {29, "ESPIPE", "illegal seek"}, + {30, "EROFS", "read-only file system"}, + {31, "EMLINK", "too many links"}, + {32, "EPIPE", "broken pipe"}, + {33, "EDOM", "numerical argument out of domain"}, + {34, "ERANGE", "numerical result out of range"}, + {35, "EDEADLK", "resource deadlock avoided"}, + {36, "ENAMETOOLONG", "file name too long"}, + {37, "ENOLCK", "no locks available"}, + {38, "ENOSYS", "function not implemented"}, + {39, "ENOTEMPTY", "directory not empty"}, + {40, "ELOOP", "too many levels of symbolic links"}, + {42, "ENOMSG", "no message of desired type"}, + {43, "EIDRM", "identifier removed"}, + {44, "ECHRNG", "channel number out of range"}, + {45, "EL2NSYNC", "level 2 not synchronized"}, + {46, "EL3HLT", "level 3 halted"}, + {47, "EL3RST", "level 3 reset"}, + {48, "ELNRNG", "link number out of range"}, + {49, "EUNATCH", "protocol driver not attached"}, + {50, "ENOCSI", "no CSI structure available"}, + {51, "EL2HLT", "level 2 halted"}, + {52, "EBADE", "invalid exchange"}, + {53, "EBADR", "invalid request descriptor"}, + {54, "EXFULL", "exchange full"}, + {55, "ENOANO", "no anode"}, + {56, "EBADRQC", "invalid request code"}, + {57, "EBADSLT", "invalid slot"}, + {59, "EBFONT", "bad font file format"}, + {60, "ENOSTR", "device not a stream"}, + {61, "ENODATA", "no data available"}, + {62, "ETIME", "timer expired"}, + {63, "ENOSR", "out of streams resources"}, + {64, "ENONET", "machine is not on the network"}, + {65, "ENOPKG", "package not installed"}, + {66, "EREMOTE", "object is remote"}, + {67, "ENOLINK", "link has been severed"}, + {68, "EADV", "advertise error"}, + {69, "ESRMNT", "srmount error"}, + {70, "ECOMM", "communication error on send"}, + {71, "EPROTO", "protocol error"}, + {72, "EMULTIHOP", "multihop attempted"}, + {73, "EDOTDOT", "RFS specific error"}, + {74, "EBADMSG", "bad message"}, + {75, "EOVERFLOW", "value too large for defined data type"}, + {76, "ENOTUNIQ", "name not unique on network"}, + {77, "EBADFD", "file descriptor in bad state"}, + {78, "EREMCHG", "remote address changed"}, + {79, "ELIBACC", "can not access a needed shared library"}, + {80, "ELIBBAD", "accessing a corrupted shared library"}, + {81, "ELIBSCN", ".lib section in a.out corrupted"}, + {82, "ELIBMAX", "attempting to link in too many shared libraries"}, + {83, "ELIBEXEC", "cannot exec a shared library directly"}, + {84, "EILSEQ", "invalid or incomplete multibyte or wide character"}, + {85, "ERESTART", "interrupted system call should be restarted"}, + {86, "ESTRPIPE", "streams pipe error"}, + {87, "EUSERS", "too many users"}, + {88, "ENOTSOCK", "socket operation on non-socket"}, + {89, "EDESTADDRREQ", "destination address required"}, + {90, "EMSGSIZE", "message too long"}, + {91, "EPROTOTYPE", "protocol wrong type for socket"}, + {92, "ENOPROTOOPT", "protocol not available"}, + {93, "EPROTONOSUPPORT", "protocol not supported"}, + {94, "ESOCKTNOSUPPORT", "socket type not supported"}, + {95, "ENOTSUP", "operation not supported"}, + {96, "EPFNOSUPPORT", "protocol family not supported"}, + {97, "EAFNOSUPPORT", "address family not supported by protocol"}, + {98, "EADDRINUSE", "address already in use"}, + {99, "EADDRNOTAVAIL", "cannot assign requested address"}, + {100, "ENETDOWN", "network is down"}, + {101, "ENETUNREACH", "network is unreachable"}, + {102, "ENETRESET", "network dropped connection on reset"}, + {103, "ECONNABORTED", "software caused connection abort"}, + {104, "ECONNRESET", "connection reset by peer"}, + {105, "ENOBUFS", "no buffer space available"}, + {106, "EISCONN", "transport endpoint is already connected"}, + {107, "ENOTCONN", "transport endpoint is not connected"}, + {108, "ESHUTDOWN", "cannot send after transport endpoint shutdown"}, + {109, "ETOOMANYREFS", "too many references: cannot splice"}, + {110, "ETIMEDOUT", "connection timed out"}, + {111, "ECONNREFUSED", "connection refused"}, + {112, "EHOSTDOWN", "host is down"}, + {113, "EHOSTUNREACH", "no route to host"}, + {114, "EALREADY", "operation already in progress"}, + {115, "EINPROGRESS", "operation now in progress"}, + {116, "ESTALE", "stale file handle"}, + {117, "EUCLEAN", "structure needs cleaning"}, + {118, "ENOTNAM", "not a XENIX named type file"}, + {119, "ENAVAIL", "no XENIX semaphores available"}, + {120, "EISNAM", "is a named type file"}, + {121, "EREMOTEIO", "remote I/O error"}, + {122, "EDQUOT", "disk quota exceeded"}, + {123, "ENOMEDIUM", "no medium found"}, + {124, "EMEDIUMTYPE", "wrong medium type"}, + {125, "ECANCELED", "operation canceled"}, + {126, "ENOKEY", "required key not available"}, + {127, "EKEYEXPIRED", "key has expired"}, + {128, "EKEYREVOKED", "key has been revoked"}, + {129, "EKEYREJECTED", "key was rejected by service"}, + {130, "EOWNERDEAD", "owner died"}, + {131, "ENOTRECOVERABLE", "state not recoverable"}, + {132, "ERFKILL", "operation not possible due to RF-kill"}, + {133, "EHWPOISON", "memory page has hardware error"}, } // Signal table -var signals = [...]string{ - 1: "hangup", - 2: "interrupt", - 3: "quit", - 4: "illegal instruction", - 5: "trace/breakpoint trap", - 6: "aborted", - 7: "bus error", - 8: "floating point exception", - 9: "killed", - 10: "user defined signal 1", - 11: "segmentation fault", - 12: "user defined signal 2", - 13: "broken pipe", - 14: "alarm clock", - 15: "terminated", - 16: "stack fault", - 17: "child exited", - 18: "continued", - 19: "stopped (signal)", - 20: "stopped", - 21: "stopped (tty input)", - 22: "stopped (tty output)", - 23: "urgent I/O condition", - 24: "CPU time limit exceeded", - 25: "file size limit exceeded", - 26: "virtual timer expired", - 27: "profiling timer expired", - 28: "window changed", - 29: "I/O possible", - 30: "power failure", - 31: "bad system call", +var signalList = [...]struct { + num syscall.Signal + name string + desc string +}{ + {1, "SIGHUP", "hangup"}, + {2, "SIGINT", "interrupt"}, + {3, "SIGQUIT", "quit"}, + {4, "SIGILL", "illegal instruction"}, + {5, "SIGTRAP", "trace/breakpoint trap"}, + {6, "SIGABRT", "aborted"}, + {7, "SIGBUS", "bus error"}, + {8, "SIGFPE", "floating point exception"}, + {9, "SIGKILL", "killed"}, + {10, "SIGUSR1", "user defined signal 1"}, + {11, "SIGSEGV", "segmentation fault"}, + {12, "SIGUSR2", "user defined signal 2"}, + {13, "SIGPIPE", "broken pipe"}, + {14, "SIGALRM", "alarm clock"}, + {15, "SIGTERM", "terminated"}, + {16, "SIGSTKFLT", "stack fault"}, + {17, "SIGCHLD", "child exited"}, + {18, "SIGCONT", "continued"}, + {19, "SIGSTOP", "stopped (signal)"}, + {20, "SIGTSTP", "stopped"}, + {21, "SIGTTIN", "stopped (tty input)"}, + {22, "SIGTTOU", "stopped (tty output)"}, + {23, "SIGURG", "urgent I/O condition"}, + {24, "SIGXCPU", "CPU time limit exceeded"}, + {25, "SIGXFSZ", "file size limit exceeded"}, + {26, "SIGVTALRM", "virtual timer expired"}, + {27, "SIGPROF", "profiling timer expired"}, + {28, "SIGWINCH", "window changed"}, + {29, "SIGIO", "I/O possible"}, + {30, "SIGPWR", "power failure"}, + {31, "SIGSYS", "bad system call"}, } diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go index 37d212ca494..6ae0ac6f862 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go @@ -11,6 +11,11 @@ package unix import "syscall" const ( + AAFS_MAGIC = 0x5a3c69f0 + ADFS_SUPER_MAGIC = 0xadf5 + AFFS_SUPER_MAGIC = 0xadff + AFS_FS_MAGIC = 0x6b414653 + AFS_SUPER_MAGIC = 0x5346414f AF_ALG = 0x26 AF_APPLETALK = 0x5 AF_ASH = 0x12 @@ -66,6 +71,7 @@ const ( ALG_SET_IV = 0x2 ALG_SET_KEY = 0x1 ALG_SET_OP = 0x3 + ANON_INODE_FS_MAGIC = 0x9041934 ARPHRD_6LOWPAN = 0x339 ARPHRD_ADAPT = 0x108 ARPHRD_APPLETLK = 0x8 @@ -133,6 +139,7 @@ const ( ARPHRD_VOID = 0xffff ARPHRD_VSOCKMON = 0x33a ARPHRD_X25 = 0x10f + AUTOFS_SUPER_MAGIC = 0x187 B0 = 0x0 B1000000 = 0x1008 B110 = 0x3 @@ -164,6 +171,9 @@ const ( B75 = 0x2 B921600 = 0x1007 B9600 = 0xd + BALLOON_KVM_MAGIC = 0x13661366 + BDEVFS_MAGIC = 0x62646576 + BINFMTFS_MAGIC = 0x42494e4d BLKBSZGET = 0x80041270 BLKBSZSET = 0x40041271 BLKFLSBUF = 0x1261 @@ -188,6 +198,7 @@ const ( BPF_AND = 0x50 BPF_B = 0x10 BPF_DIV = 0x30 + BPF_FS_MAGIC = 0xcafe4a11 BPF_H = 0x8 BPF_IMM = 0x0 BPF_IND = 0x40 @@ -229,6 +240,8 @@ const ( BS0 = 0x0 BS1 = 0x2000 BSDLY = 0x2000 + BTRFS_SUPER_MAGIC = 0x9123683e + BTRFS_TEST_MAGIC = 0x73727279 CAN_BCM = 0x2 CAN_EFF_FLAG = 0x80000000 CAN_EFF_ID_BITS = 0x1d @@ -252,6 +265,8 @@ const ( CBAUD = 0x100f CBAUDEX = 0x1000 CFLUSH = 0xf + CGROUP2_SUPER_MAGIC = 0x63677270 + CGROUP_SUPER_MAGIC = 0x27e0eb CIBAUD = 0x100f0000 CLOCAL = 0x800 CLOCK_BOOTTIME = 0x7 @@ -294,10 +309,12 @@ const ( CLONE_VFORK = 0x4000 CLONE_VM = 0x100 CMSPAR = 0x40000000 + CODA_SUPER_MAGIC = 0x73757245 CR0 = 0x0 CR1 = 0x200 CR2 = 0x400 CR3 = 0x600 + CRAMFS_MAGIC = 0x28cd3d45 CRDLY = 0x600 CREAD = 0x80 CRTSCTS = 0x80000000 @@ -312,6 +329,9 @@ const ( CSTOP = 0x13 CSTOPB = 0x40 CSUSP = 0x1a + DAXFS_MAGIC = 0x64646178 + DEBUGFS_MAGIC = 0x64626720 + DEVPTS_SUPER_MAGIC = 0x1cd1 DT_BLK = 0x6 DT_CHR = 0x2 DT_DIR = 0x4 @@ -328,9 +348,12 @@ const ( ECHOKE = 0x800 ECHONL = 0x40 ECHOPRT = 0x400 + ECRYPTFS_SUPER_MAGIC = 0xf15f EFD_CLOEXEC = 0x80000 EFD_NONBLOCK = 0x800 EFD_SEMAPHORE = 0x1 + EFIVARFS_MAGIC = 0xde5e81e4 + EFS_SUPER_MAGIC = 0x414a53 ENCODING_DEFAULT = 0x0 ENCODING_FM_MARK = 0x3 ENCODING_FM_SPACE = 0x4 @@ -392,6 +415,7 @@ const ( ETH_P_ECONET = 0x18 ETH_P_EDSA = 0xdada ETH_P_ERSPAN = 0x88be + ETH_P_ERSPAN2 = 0x22eb ETH_P_FCOE = 0x8906 ETH_P_FIP = 0x8914 ETH_P_HDLC = 0x19 @@ -445,9 +469,14 @@ const ( ETH_P_WCCP = 0x883e ETH_P_X25 = 0x805 ETH_P_XDSA = 0xf8 + EXABYTE_ENABLE_NEST = 0xf0 + EXT2_SUPER_MAGIC = 0xef53 + EXT3_SUPER_MAGIC = 0xef53 + EXT4_SUPER_MAGIC = 0xef53 EXTA = 0xe EXTB = 0xf EXTPROC = 0x10000 + F2FS_SUPER_MAGIC = 0xf2f52010 FALLOC_FL_COLLAPSE_RANGE = 0x8 FALLOC_FL_INSERT_RANGE = 0x20 FALLOC_FL_KEEP_SIZE = 0x1 @@ -481,6 +510,7 @@ const ( FS_POLICY_FLAGS_PAD_8 = 0x1 FS_POLICY_FLAGS_PAD_MASK = 0x3 FS_POLICY_FLAGS_VALID = 0x3 + FUTEXFS_SUPER_MAGIC = 0xbad1dea F_ADD_SEALS = 0x409 F_DUPFD = 0x0 F_DUPFD_CLOEXEC = 0x406 @@ -542,6 +572,49 @@ const ( GENL_UNS_ADMIN_PERM = 0x10 GRND_NONBLOCK = 0x1 GRND_RANDOM = 0x2 + HDIO_DRIVE_CMD = 0x31f + HDIO_DRIVE_CMD_AEB = 0x31e + HDIO_DRIVE_CMD_HDR_SIZE = 0x4 + HDIO_DRIVE_HOB_HDR_SIZE = 0x8 + HDIO_DRIVE_RESET = 0x31c + HDIO_DRIVE_TASK = 0x31e + HDIO_DRIVE_TASKFILE = 0x31d + HDIO_DRIVE_TASK_HDR_SIZE = 0x8 + HDIO_GETGEO = 0x301 + HDIO_GET_32BIT = 0x309 + HDIO_GET_ACOUSTIC = 0x30f + HDIO_GET_ADDRESS = 0x310 + HDIO_GET_BUSSTATE = 0x31a + HDIO_GET_DMA = 0x30b + HDIO_GET_IDENTITY = 0x30d + HDIO_GET_KEEPSETTINGS = 0x308 + HDIO_GET_MULTCOUNT = 0x304 + HDIO_GET_NICE = 0x30c + HDIO_GET_NOWERR = 0x30a + HDIO_GET_QDMA = 0x305 + HDIO_GET_UNMASKINTR = 0x302 + HDIO_GET_WCACHE = 0x30e + HDIO_OBSOLETE_IDENTITY = 0x307 + HDIO_SCAN_HWIF = 0x328 + HDIO_SET_32BIT = 0x324 + HDIO_SET_ACOUSTIC = 0x32c + HDIO_SET_ADDRESS = 0x32f + HDIO_SET_BUSSTATE = 0x32d + HDIO_SET_DMA = 0x326 + HDIO_SET_KEEPSETTINGS = 0x323 + HDIO_SET_MULTCOUNT = 0x321 + HDIO_SET_NICE = 0x329 + HDIO_SET_NOWERR = 0x325 + HDIO_SET_PIO_MODE = 0x327 + HDIO_SET_QDMA = 0x32e + HDIO_SET_UNMASKINTR = 0x322 + HDIO_SET_WCACHE = 0x32b + HDIO_SET_XFER = 0x306 + HDIO_TRISTATE_HWIF = 0x31b + HDIO_UNREGISTER_HWIF = 0x32a + HOSTFS_SUPER_MAGIC = 0xc0ffee + HPFS_SUPER_MAGIC = 0xf995e849 + HUGETLBFS_MAGIC = 0x958458f6 HUPCL = 0x400 IBSHIFT = 0x10 ICANON = 0x2 @@ -792,12 +865,14 @@ const ( IP_UNICAST_IF = 0x32 IP_XFRM_POLICY = 0x11 ISIG = 0x1 + ISOFS_SUPER_MAGIC = 0x9660 ISTRIP = 0x20 IUCLC = 0x200 IUTF8 = 0x4000 IXANY = 0x800 IXOFF = 0x1000 IXON = 0x400 + JFFS2_SUPER_MAGIC = 0x72b6 KEYCTL_ASSUME_AUTHORITY = 0x10 KEYCTL_CHOWN = 0x4 KEYCTL_CLEAR = 0x7 @@ -893,9 +968,15 @@ const ( MCL_CURRENT = 0x1 MCL_FUTURE = 0x2 MCL_ONFAULT = 0x4 + MINIX2_SUPER_MAGIC = 0x2468 + MINIX2_SUPER_MAGIC2 = 0x2478 + MINIX3_SUPER_MAGIC = 0x4d5a + MINIX_SUPER_MAGIC = 0x137f + MINIX_SUPER_MAGIC2 = 0x138f MNT_DETACH = 0x2 MNT_EXPIRE = 0x4 MNT_FORCE = 0x1 + MSDOS_SUPER_MAGIC = 0x4d44 MSG_BATCH = 0x40000 MSG_CMSG_CLOEXEC = 0x40000000 MSG_CONFIRM = 0x800 @@ -955,7 +1036,9 @@ const ( MS_SYNCHRONOUS = 0x10 MS_UNBINDABLE = 0x20000 MS_VERBOSE = 0x8000 + MTD_INODE_FS_MAGIC = 0x11307854 NAME_MAX = 0xff + NCP_SUPER_MAGIC = 0x564c NETLINK_ADD_MEMBERSHIP = 0x1 NETLINK_AUDIT = 0x9 NETLINK_BROADCAST_ERROR = 0x4 @@ -990,6 +1073,37 @@ const ( NETLINK_UNUSED = 0x1 NETLINK_USERSOCK = 0x2 NETLINK_XFRM = 0x6 + NFNETLINK_V0 = 0x0 + NFNLGRP_ACCT_QUOTA = 0x8 + NFNLGRP_CONNTRACK_DESTROY = 0x3 + NFNLGRP_CONNTRACK_EXP_DESTROY = 0x6 + NFNLGRP_CONNTRACK_EXP_NEW = 0x4 + NFNLGRP_CONNTRACK_EXP_UPDATE = 0x5 + NFNLGRP_CONNTRACK_NEW = 0x1 + NFNLGRP_CONNTRACK_UPDATE = 0x2 + NFNLGRP_MAX = 0x9 + NFNLGRP_NFTABLES = 0x7 + NFNLGRP_NFTRACE = 0x9 + NFNLGRP_NONE = 0x0 + NFNL_BATCH_MAX = 0x1 + NFNL_MSG_BATCH_BEGIN = 0x10 + NFNL_MSG_BATCH_END = 0x11 + NFNL_NFA_NEST = 0x8000 + NFNL_SUBSYS_ACCT = 0x7 + NFNL_SUBSYS_COUNT = 0xc + NFNL_SUBSYS_CTHELPER = 0x9 + NFNL_SUBSYS_CTNETLINK = 0x1 + NFNL_SUBSYS_CTNETLINK_EXP = 0x2 + NFNL_SUBSYS_CTNETLINK_TIMEOUT = 0x8 + NFNL_SUBSYS_IPSET = 0x6 + NFNL_SUBSYS_NFTABLES = 0xa + NFNL_SUBSYS_NFT_COMPAT = 0xb + NFNL_SUBSYS_NONE = 0x0 + NFNL_SUBSYS_OSF = 0x5 + NFNL_SUBSYS_QUEUE = 0x3 + NFNL_SUBSYS_ULOG = 0x4 + NFS_SUPER_MAGIC = 0x6969 + NILFS_SUPER_MAGIC = 0x3434 NL0 = 0x0 NL1 = 0x100 NLA_ALIGNTO = 0x4 @@ -1022,6 +1136,8 @@ const ( NLM_F_REQUEST = 0x1 NLM_F_ROOT = 0x100 NOFLSH = 0x80 + NSFS_MAGIC = 0x6e736673 + OCFS2_SUPER_MAGIC = 0x7461636f OCRNL = 0x8 OFDEL = 0x80 OFILL = 0x40 @@ -1029,7 +1145,9 @@ const ( ONLCR = 0x4 ONLRET = 0x20 ONOCR = 0x10 + OPENPROM_SUPER_MAGIC = 0x9fa1 OPOST = 0x1 + OVERLAYFS_SUPER_MAGIC = 0x794c7630 O_ACCMODE = 0x3 O_APPEND = 0x400 O_ASYNC = 0x2000 @@ -1116,14 +1234,17 @@ const ( PERF_EVENT_IOC_ID = 0x80042407 PERF_EVENT_IOC_PAUSE_OUTPUT = 0x40042409 PERF_EVENT_IOC_PERIOD = 0x40082404 + PERF_EVENT_IOC_QUERY_BPF = 0xc004240a PERF_EVENT_IOC_REFRESH = 0x2402 PERF_EVENT_IOC_RESET = 0x2403 PERF_EVENT_IOC_SET_BPF = 0x40042408 PERF_EVENT_IOC_SET_FILTER = 0x40042406 PERF_EVENT_IOC_SET_OUTPUT = 0x2405 + PIPEFS_MAGIC = 0x50495045 PRIO_PGRP = 0x1 PRIO_PROCESS = 0x0 PRIO_USER = 0x2 + PROC_SUPER_MAGIC = 0x9fa0 PROT_EXEC = 0x4 PROT_GROWSDOWN = 0x1000000 PROT_GROWSUP = 0x2000000 @@ -1229,6 +1350,7 @@ const ( PR_TSC_SIGSEGV = 0x2 PR_UNALIGN_NOPRINT = 0x1 PR_UNALIGN_SIGBUS = 0x2 + PSTOREFS_MAGIC = 0x6165676c PTRACE_ATTACH = 0x10 PTRACE_CONT = 0x7 PTRACE_DETACH = 0x11 @@ -1278,6 +1400,7 @@ const ( PTRACE_POKETEXT = 0x4 PTRACE_POKEUSR = 0x6 PTRACE_SECCOMP_GET_FILTER = 0x420c + PTRACE_SECCOMP_GET_METADATA = 0x420d PTRACE_SEIZE = 0x4206 PTRACE_SETCRUNCHREGS = 0x1a PTRACE_SETFPREGS = 0xf @@ -1296,6 +1419,11 @@ const ( PT_DATA_ADDR = 0x10004 PT_TEXT_ADDR = 0x10000 PT_TEXT_END_ADDR = 0x10008 + QNX4_SUPER_MAGIC = 0x2f + QNX6_SUPER_MAGIC = 0x68191122 + RAMFS_MAGIC = 0x858458f6 + RDTGROUP_SUPER_MAGIC = 0x7655821 + REISERFS_SUPER_MAGIC = 0x52654973 RLIMIT_AS = 0x9 RLIMIT_CORE = 0x4 RLIMIT_CPU = 0x0 @@ -1481,6 +1609,8 @@ const ( SECCOMP_MODE_DISABLED = 0x0 SECCOMP_MODE_FILTER = 0x2 SECCOMP_MODE_STRICT = 0x1 + SECURITYFS_MAGIC = 0x73636673 + SELINUX_MAGIC = 0xf97cff8c SHUT_RD = 0x0 SHUT_RDWR = 0x2 SHUT_WR = 0x1 @@ -1565,6 +1695,23 @@ const ( SIOCSPGRP = 0x8902 SIOCSRARP = 0x8962 SIOCWANDEV = 0x894a + SMACK_MAGIC = 0x43415d53 + SMART_AUTOSAVE = 0xd2 + SMART_AUTO_OFFLINE = 0xdb + SMART_DISABLE = 0xd9 + SMART_ENABLE = 0xd8 + SMART_HCYL_PASS = 0xc2 + SMART_IMMEDIATE_OFFLINE = 0xd4 + SMART_LCYL_PASS = 0x4f + SMART_READ_LOG_SECTOR = 0xd5 + SMART_READ_THRESHOLDS = 0xd1 + SMART_READ_VALUES = 0xd0 + SMART_SAVE = 0xd3 + SMART_STATUS = 0xda + SMART_WRITE_LOG_SECTOR = 0xd6 + SMART_WRITE_THRESHOLDS = 0xd7 + SMB_SUPER_MAGIC = 0x517b + SOCKFS_MAGIC = 0x534f434b SOCK_CLOEXEC = 0x80000 SOCK_DCCP = 0x6 SOCK_DGRAM = 0x2 @@ -1675,6 +1822,8 @@ const ( SPLICE_F_MORE = 0x4 SPLICE_F_MOVE = 0x1 SPLICE_F_NONBLOCK = 0x2 + SQUASHFS_MAGIC = 0x73717368 + STACK_END_MAGIC = 0x57ac6e9d STATX_ALL = 0xfff STATX_ATIME = 0x20 STATX_ATTR_APPEND = 0x20 @@ -1696,6 +1845,7 @@ const ( STATX_TYPE = 0x1 STATX_UID = 0x8 STATX__RESERVED = 0x80000000 + SYSFS_MAGIC = 0x62656572 S_BLKSIZE = 0x200 S_IEXEC = 0x40 S_IFBLK = 0x6000 @@ -1877,7 +2027,27 @@ const ( TIOCSTI = 0x5412 TIOCSWINSZ = 0x5414 TIOCVHANGUP = 0x5437 + TMPFS_MAGIC = 0x1021994 TOSTOP = 0x100 + TPACKET_ALIGNMENT = 0x10 + TPACKET_HDRLEN = 0x34 + TP_STATUS_AVAILABLE = 0x0 + TP_STATUS_BLK_TMO = 0x20 + TP_STATUS_COPY = 0x2 + TP_STATUS_CSUMNOTREADY = 0x8 + TP_STATUS_CSUM_VALID = 0x80 + TP_STATUS_KERNEL = 0x0 + TP_STATUS_LOSING = 0x4 + TP_STATUS_SENDING = 0x2 + TP_STATUS_SEND_REQUEST = 0x1 + TP_STATUS_TS_RAW_HARDWARE = -0x80000000 + TP_STATUS_TS_SOFTWARE = 0x20000000 + TP_STATUS_TS_SYS_HARDWARE = 0x40000000 + TP_STATUS_USER = 0x1 + TP_STATUS_VLAN_TPID_VALID = 0x40 + TP_STATUS_VLAN_VALID = 0x10 + TP_STATUS_WRONG_FORMAT = 0x4 + TRACEFS_MAGIC = 0x74726163 TS_COMM_LEN = 0x20 TUNATTACHFILTER = 0x400854d5 TUNDETACHFILTER = 0x400854d6 @@ -1889,6 +2059,7 @@ const ( TUNGETVNETHDRSZ = 0x800454d7 TUNGETVNETLE = 0x800454dd TUNSETDEBUG = 0x400454c9 + TUNSETFILTEREBPF = 0x800454e1 TUNSETGROUP = 0x400454ce TUNSETIFF = 0x400454ca TUNSETIFINDEX = 0x400454da @@ -1899,13 +2070,17 @@ const ( TUNSETPERSIST = 0x400454cb TUNSETQUEUE = 0x400454d9 TUNSETSNDBUF = 0x400454d4 + TUNSETSTEERINGEBPF = 0x800454e0 TUNSETTXFILTER = 0x400454d1 TUNSETVNETBE = 0x400454de TUNSETVNETHDRSZ = 0x400454d8 TUNSETVNETLE = 0x400454dc + UDF_SUPER_MAGIC = 0x15013346 UMOUNT_NOFOLLOW = 0x8 + USBDEVICE_SUPER_MAGIC = 0x9fa2 UTIME_NOW = 0x3fffffff UTIME_OMIT = 0x3ffffffe + V9FS_MAGIC = 0x1021997 VDISCARD = 0xd VEOF = 0x4 VEOL = 0xb @@ -1947,6 +2122,86 @@ const ( WDIOC_SETPRETIMEOUT = 0xc0045708 WDIOC_SETTIMEOUT = 0xc0045706 WEXITED = 0x4 + WIN_ACKMEDIACHANGE = 0xdb + WIN_CHECKPOWERMODE1 = 0xe5 + WIN_CHECKPOWERMODE2 = 0x98 + WIN_DEVICE_RESET = 0x8 + WIN_DIAGNOSE = 0x90 + WIN_DOORLOCK = 0xde + WIN_DOORUNLOCK = 0xdf + WIN_DOWNLOAD_MICROCODE = 0x92 + WIN_FLUSH_CACHE = 0xe7 + WIN_FLUSH_CACHE_EXT = 0xea + WIN_FORMAT = 0x50 + WIN_GETMEDIASTATUS = 0xda + WIN_IDENTIFY = 0xec + WIN_IDENTIFY_DMA = 0xee + WIN_IDLEIMMEDIATE = 0xe1 + WIN_INIT = 0x60 + WIN_MEDIAEJECT = 0xed + WIN_MULTREAD = 0xc4 + WIN_MULTREAD_EXT = 0x29 + WIN_MULTWRITE = 0xc5 + WIN_MULTWRITE_EXT = 0x39 + WIN_NOP = 0x0 + WIN_PACKETCMD = 0xa0 + WIN_PIDENTIFY = 0xa1 + WIN_POSTBOOT = 0xdc + WIN_PREBOOT = 0xdd + WIN_QUEUED_SERVICE = 0xa2 + WIN_READ = 0x20 + WIN_READDMA = 0xc8 + WIN_READDMA_EXT = 0x25 + WIN_READDMA_ONCE = 0xc9 + WIN_READDMA_QUEUED = 0xc7 + WIN_READDMA_QUEUED_EXT = 0x26 + WIN_READ_BUFFER = 0xe4 + WIN_READ_EXT = 0x24 + WIN_READ_LONG = 0x22 + WIN_READ_LONG_ONCE = 0x23 + WIN_READ_NATIVE_MAX = 0xf8 + WIN_READ_NATIVE_MAX_EXT = 0x27 + WIN_READ_ONCE = 0x21 + WIN_RECAL = 0x10 + WIN_RESTORE = 0x10 + WIN_SECURITY_DISABLE = 0xf6 + WIN_SECURITY_ERASE_PREPARE = 0xf3 + WIN_SECURITY_ERASE_UNIT = 0xf4 + WIN_SECURITY_FREEZE_LOCK = 0xf5 + WIN_SECURITY_SET_PASS = 0xf1 + WIN_SECURITY_UNLOCK = 0xf2 + WIN_SEEK = 0x70 + WIN_SETFEATURES = 0xef + WIN_SETIDLE1 = 0xe3 + WIN_SETIDLE2 = 0x97 + WIN_SETMULT = 0xc6 + WIN_SET_MAX = 0xf9 + WIN_SET_MAX_EXT = 0x37 + WIN_SLEEPNOW1 = 0xe6 + WIN_SLEEPNOW2 = 0x99 + WIN_SMART = 0xb0 + WIN_SPECIFY = 0x91 + WIN_SRST = 0x8 + WIN_STANDBY = 0xe2 + WIN_STANDBY2 = 0x96 + WIN_STANDBYNOW1 = 0xe0 + WIN_STANDBYNOW2 = 0x94 + WIN_VERIFY = 0x40 + WIN_VERIFY_EXT = 0x42 + WIN_VERIFY_ONCE = 0x41 + WIN_WRITE = 0x30 + WIN_WRITEDMA = 0xca + WIN_WRITEDMA_EXT = 0x35 + WIN_WRITEDMA_ONCE = 0xcb + WIN_WRITEDMA_QUEUED = 0xcc + WIN_WRITEDMA_QUEUED_EXT = 0x36 + WIN_WRITE_BUFFER = 0xe8 + WIN_WRITE_EXT = 0x34 + WIN_WRITE_LONG = 0x32 + WIN_WRITE_LONG_ONCE = 0x33 + WIN_WRITE_ONCE = 0x31 + WIN_WRITE_SAME = 0xe9 + WIN_WRITE_VERIFY = 0x3c WNOHANG = 0x1 WNOTHREAD = 0x20000000 WNOWAIT = 0x1000000 @@ -1956,7 +2211,9 @@ const ( XATTR_CREATE = 0x1 XATTR_REPLACE = 0x2 XCASE = 0x4 + XENFS_SUPER_MAGIC = 0xabba1974 XTABS = 0x1800 + ZSMALLOC_MAGIC = 0x58295829 ) // Errors @@ -2136,171 +2393,179 @@ const ( ) // Error table -var errors = [...]string{ - 1: "operation not permitted", - 2: "no such file or directory", - 3: "no such process", - 4: "interrupted system call", - 5: "input/output error", - 6: "no such device or address", - 7: "argument list too long", - 8: "exec format error", - 9: "bad file descriptor", - 10: "no child processes", - 11: "resource temporarily unavailable", - 12: "cannot allocate memory", - 13: "permission denied", - 14: "bad address", - 15: "block device required", - 16: "device or resource busy", - 17: "file exists", - 18: "invalid cross-device link", - 19: "no such device", - 20: "not a directory", - 21: "is a directory", - 22: "invalid argument", - 23: "too many open files in system", - 24: "too many open files", - 25: "inappropriate ioctl for device", - 26: "text file busy", - 27: "file too large", - 28: "no space left on device", - 29: "illegal seek", - 30: "read-only file system", - 31: "too many links", - 32: "broken pipe", - 33: "numerical argument out of domain", - 34: "numerical result out of range", - 35: "resource deadlock avoided", - 36: "file name too long", - 37: "no locks available", - 38: "function not implemented", - 39: "directory not empty", - 40: "too many levels of symbolic links", - 42: "no message of desired type", - 43: "identifier removed", - 44: "channel number out of range", - 45: "level 2 not synchronized", - 46: "level 3 halted", - 47: "level 3 reset", - 48: "link number out of range", - 49: "protocol driver not attached", - 50: "no CSI structure available", - 51: "level 2 halted", - 52: "invalid exchange", - 53: "invalid request descriptor", - 54: "exchange full", - 55: "no anode", - 56: "invalid request code", - 57: "invalid slot", - 59: "bad font file format", - 60: "device not a stream", - 61: "no data available", - 62: "timer expired", - 63: "out of streams resources", - 64: "machine is not on the network", - 65: "package not installed", - 66: "object is remote", - 67: "link has been severed", - 68: "advertise error", - 69: "srmount error", - 70: "communication error on send", - 71: "protocol error", - 72: "multihop attempted", - 73: "RFS specific error", - 74: "bad message", - 75: "value too large for defined data type", - 76: "name not unique on network", - 77: "file descriptor in bad state", - 78: "remote address changed", - 79: "can not access a needed shared library", - 80: "accessing a corrupted shared library", - 81: ".lib section in a.out corrupted", - 82: "attempting to link in too many shared libraries", - 83: "cannot exec a shared library directly", - 84: "invalid or incomplete multibyte or wide character", - 85: "interrupted system call should be restarted", - 86: "streams pipe error", - 87: "too many users", - 88: "socket operation on non-socket", - 89: "destination address required", - 90: "message too long", - 91: "protocol wrong type for socket", - 92: "protocol not available", - 93: "protocol not supported", - 94: "socket type not supported", - 95: "operation not supported", - 96: "protocol family not supported", - 97: "address family not supported by protocol", - 98: "address already in use", - 99: "cannot assign requested address", - 100: "network is down", - 101: "network is unreachable", - 102: "network dropped connection on reset", - 103: "software caused connection abort", - 104: "connection reset by peer", - 105: "no buffer space available", - 106: "transport endpoint is already connected", - 107: "transport endpoint is not connected", - 108: "cannot send after transport endpoint shutdown", - 109: "too many references: cannot splice", - 110: "connection timed out", - 111: "connection refused", - 112: "host is down", - 113: "no route to host", - 114: "operation already in progress", - 115: "operation now in progress", - 116: "stale file handle", - 117: "structure needs cleaning", - 118: "not a XENIX named type file", - 119: "no XENIX semaphores available", - 120: "is a named type file", - 121: "remote I/O error", - 122: "disk quota exceeded", - 123: "no medium found", - 124: "wrong medium type", - 125: "operation canceled", - 126: "required key not available", - 127: "key has expired", - 128: "key has been revoked", - 129: "key was rejected by service", - 130: "owner died", - 131: "state not recoverable", - 132: "operation not possible due to RF-kill", - 133: "memory page has hardware error", +var errorList = [...]struct { + num syscall.Errno + name string + desc string +}{ + {1, "EPERM", "operation not permitted"}, + {2, "ENOENT", "no such file or directory"}, + {3, "ESRCH", "no such process"}, + {4, "EINTR", "interrupted system call"}, + {5, "EIO", "input/output error"}, + {6, "ENXIO", "no such device or address"}, + {7, "E2BIG", "argument list too long"}, + {8, "ENOEXEC", "exec format error"}, + {9, "EBADF", "bad file descriptor"}, + {10, "ECHILD", "no child processes"}, + {11, "EAGAIN", "resource temporarily unavailable"}, + {12, "ENOMEM", "cannot allocate memory"}, + {13, "EACCES", "permission denied"}, + {14, "EFAULT", "bad address"}, + {15, "ENOTBLK", "block device required"}, + {16, "EBUSY", "device or resource busy"}, + {17, "EEXIST", "file exists"}, + {18, "EXDEV", "invalid cross-device link"}, + {19, "ENODEV", "no such device"}, + {20, "ENOTDIR", "not a directory"}, + {21, "EISDIR", "is a directory"}, + {22, "EINVAL", "invalid argument"}, + {23, "ENFILE", "too many open files in system"}, + {24, "EMFILE", "too many open files"}, + {25, "ENOTTY", "inappropriate ioctl for device"}, + {26, "ETXTBSY", "text file busy"}, + {27, "EFBIG", "file too large"}, + {28, "ENOSPC", "no space left on device"}, + {29, "ESPIPE", "illegal seek"}, + {30, "EROFS", "read-only file system"}, + {31, "EMLINK", "too many links"}, + {32, "EPIPE", "broken pipe"}, + {33, "EDOM", "numerical argument out of domain"}, + {34, "ERANGE", "numerical result out of range"}, + {35, "EDEADLK", "resource deadlock avoided"}, + {36, "ENAMETOOLONG", "file name too long"}, + {37, "ENOLCK", "no locks available"}, + {38, "ENOSYS", "function not implemented"}, + {39, "ENOTEMPTY", "directory not empty"}, + {40, "ELOOP", "too many levels of symbolic links"}, + {42, "ENOMSG", "no message of desired type"}, + {43, "EIDRM", "identifier removed"}, + {44, "ECHRNG", "channel number out of range"}, + {45, "EL2NSYNC", "level 2 not synchronized"}, + {46, "EL3HLT", "level 3 halted"}, + {47, "EL3RST", "level 3 reset"}, + {48, "ELNRNG", "link number out of range"}, + {49, "EUNATCH", "protocol driver not attached"}, + {50, "ENOCSI", "no CSI structure available"}, + {51, "EL2HLT", "level 2 halted"}, + {52, "EBADE", "invalid exchange"}, + {53, "EBADR", "invalid request descriptor"}, + {54, "EXFULL", "exchange full"}, + {55, "ENOANO", "no anode"}, + {56, "EBADRQC", "invalid request code"}, + {57, "EBADSLT", "invalid slot"}, + {59, "EBFONT", "bad font file format"}, + {60, "ENOSTR", "device not a stream"}, + {61, "ENODATA", "no data available"}, + {62, "ETIME", "timer expired"}, + {63, "ENOSR", "out of streams resources"}, + {64, "ENONET", "machine is not on the network"}, + {65, "ENOPKG", "package not installed"}, + {66, "EREMOTE", "object is remote"}, + {67, "ENOLINK", "link has been severed"}, + {68, "EADV", "advertise error"}, + {69, "ESRMNT", "srmount error"}, + {70, "ECOMM", "communication error on send"}, + {71, "EPROTO", "protocol error"}, + {72, "EMULTIHOP", "multihop attempted"}, + {73, "EDOTDOT", "RFS specific error"}, + {74, "EBADMSG", "bad message"}, + {75, "EOVERFLOW", "value too large for defined data type"}, + {76, "ENOTUNIQ", "name not unique on network"}, + {77, "EBADFD", "file descriptor in bad state"}, + {78, "EREMCHG", "remote address changed"}, + {79, "ELIBACC", "can not access a needed shared library"}, + {80, "ELIBBAD", "accessing a corrupted shared library"}, + {81, "ELIBSCN", ".lib section in a.out corrupted"}, + {82, "ELIBMAX", "attempting to link in too many shared libraries"}, + {83, "ELIBEXEC", "cannot exec a shared library directly"}, + {84, "EILSEQ", "invalid or incomplete multibyte or wide character"}, + {85, "ERESTART", "interrupted system call should be restarted"}, + {86, "ESTRPIPE", "streams pipe error"}, + {87, "EUSERS", "too many users"}, + {88, "ENOTSOCK", "socket operation on non-socket"}, + {89, "EDESTADDRREQ", "destination address required"}, + {90, "EMSGSIZE", "message too long"}, + {91, "EPROTOTYPE", "protocol wrong type for socket"}, + {92, "ENOPROTOOPT", "protocol not available"}, + {93, "EPROTONOSUPPORT", "protocol not supported"}, + {94, "ESOCKTNOSUPPORT", "socket type not supported"}, + {95, "ENOTSUP", "operation not supported"}, + {96, "EPFNOSUPPORT", "protocol family not supported"}, + {97, "EAFNOSUPPORT", "address family not supported by protocol"}, + {98, "EADDRINUSE", "address already in use"}, + {99, "EADDRNOTAVAIL", "cannot assign requested address"}, + {100, "ENETDOWN", "network is down"}, + {101, "ENETUNREACH", "network is unreachable"}, + {102, "ENETRESET", "network dropped connection on reset"}, + {103, "ECONNABORTED", "software caused connection abort"}, + {104, "ECONNRESET", "connection reset by peer"}, + {105, "ENOBUFS", "no buffer space available"}, + {106, "EISCONN", "transport endpoint is already connected"}, + {107, "ENOTCONN", "transport endpoint is not connected"}, + {108, "ESHUTDOWN", "cannot send after transport endpoint shutdown"}, + {109, "ETOOMANYREFS", "too many references: cannot splice"}, + {110, "ETIMEDOUT", "connection timed out"}, + {111, "ECONNREFUSED", "connection refused"}, + {112, "EHOSTDOWN", "host is down"}, + {113, "EHOSTUNREACH", "no route to host"}, + {114, "EALREADY", "operation already in progress"}, + {115, "EINPROGRESS", "operation now in progress"}, + {116, "ESTALE", "stale file handle"}, + {117, "EUCLEAN", "structure needs cleaning"}, + {118, "ENOTNAM", "not a XENIX named type file"}, + {119, "ENAVAIL", "no XENIX semaphores available"}, + {120, "EISNAM", "is a named type file"}, + {121, "EREMOTEIO", "remote I/O error"}, + {122, "EDQUOT", "disk quota exceeded"}, + {123, "ENOMEDIUM", "no medium found"}, + {124, "EMEDIUMTYPE", "wrong medium type"}, + {125, "ECANCELED", "operation canceled"}, + {126, "ENOKEY", "required key not available"}, + {127, "EKEYEXPIRED", "key has expired"}, + {128, "EKEYREVOKED", "key has been revoked"}, + {129, "EKEYREJECTED", "key was rejected by service"}, + {130, "EOWNERDEAD", "owner died"}, + {131, "ENOTRECOVERABLE", "state not recoverable"}, + {132, "ERFKILL", "operation not possible due to RF-kill"}, + {133, "EHWPOISON", "memory page has hardware error"}, } // Signal table -var signals = [...]string{ - 1: "hangup", - 2: "interrupt", - 3: "quit", - 4: "illegal instruction", - 5: "trace/breakpoint trap", - 6: "aborted", - 7: "bus error", - 8: "floating point exception", - 9: "killed", - 10: "user defined signal 1", - 11: "segmentation fault", - 12: "user defined signal 2", - 13: "broken pipe", - 14: "alarm clock", - 15: "terminated", - 16: "stack fault", - 17: "child exited", - 18: "continued", - 19: "stopped (signal)", - 20: "stopped", - 21: "stopped (tty input)", - 22: "stopped (tty output)", - 23: "urgent I/O condition", - 24: "CPU time limit exceeded", - 25: "file size limit exceeded", - 26: "virtual timer expired", - 27: "profiling timer expired", - 28: "window changed", - 29: "I/O possible", - 30: "power failure", - 31: "bad system call", +var signalList = [...]struct { + num syscall.Signal + name string + desc string +}{ + {1, "SIGHUP", "hangup"}, + {2, "SIGINT", "interrupt"}, + {3, "SIGQUIT", "quit"}, + {4, "SIGILL", "illegal instruction"}, + {5, "SIGTRAP", "trace/breakpoint trap"}, + {6, "SIGABRT", "aborted"}, + {7, "SIGBUS", "bus error"}, + {8, "SIGFPE", "floating point exception"}, + {9, "SIGKILL", "killed"}, + {10, "SIGUSR1", "user defined signal 1"}, + {11, "SIGSEGV", "segmentation fault"}, + {12, "SIGUSR2", "user defined signal 2"}, + {13, "SIGPIPE", "broken pipe"}, + {14, "SIGALRM", "alarm clock"}, + {15, "SIGTERM", "terminated"}, + {16, "SIGSTKFLT", "stack fault"}, + {17, "SIGCHLD", "child exited"}, + {18, "SIGCONT", "continued"}, + {19, "SIGSTOP", "stopped (signal)"}, + {20, "SIGTSTP", "stopped"}, + {21, "SIGTTIN", "stopped (tty input)"}, + {22, "SIGTTOU", "stopped (tty output)"}, + {23, "SIGURG", "urgent I/O condition"}, + {24, "SIGXCPU", "CPU time limit exceeded"}, + {25, "SIGXFSZ", "file size limit exceeded"}, + {26, "SIGVTALRM", "virtual timer expired"}, + {27, "SIGPROF", "profiling timer expired"}, + {28, "SIGWINCH", "window changed"}, + {29, "SIGIO", "I/O possible"}, + {30, "SIGPWR", "power failure"}, + {31, "SIGSYS", "bad system call"}, } diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go index 51d84a35c30..f58450bf3d3 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go @@ -11,6 +11,11 @@ package unix import "syscall" const ( + AAFS_MAGIC = 0x5a3c69f0 + ADFS_SUPER_MAGIC = 0xadf5 + AFFS_SUPER_MAGIC = 0xadff + AFS_FS_MAGIC = 0x6b414653 + AFS_SUPER_MAGIC = 0x5346414f AF_ALG = 0x26 AF_APPLETALK = 0x5 AF_ASH = 0x12 @@ -66,6 +71,7 @@ const ( ALG_SET_IV = 0x2 ALG_SET_KEY = 0x1 ALG_SET_OP = 0x3 + ANON_INODE_FS_MAGIC = 0x9041934 ARPHRD_6LOWPAN = 0x339 ARPHRD_ADAPT = 0x108 ARPHRD_APPLETLK = 0x8 @@ -133,6 +139,7 @@ const ( ARPHRD_VOID = 0xffff ARPHRD_VSOCKMON = 0x33a ARPHRD_X25 = 0x10f + AUTOFS_SUPER_MAGIC = 0x187 B0 = 0x0 B1000000 = 0x1008 B110 = 0x3 @@ -164,6 +171,9 @@ const ( B75 = 0x2 B921600 = 0x1007 B9600 = 0xd + BALLOON_KVM_MAGIC = 0x13661366 + BDEVFS_MAGIC = 0x62646576 + BINFMTFS_MAGIC = 0x42494e4d BLKBSZGET = 0x80081270 BLKBSZSET = 0x40081271 BLKFLSBUF = 0x1261 @@ -188,6 +198,7 @@ const ( BPF_AND = 0x50 BPF_B = 0x10 BPF_DIV = 0x30 + BPF_FS_MAGIC = 0xcafe4a11 BPF_H = 0x8 BPF_IMM = 0x0 BPF_IND = 0x40 @@ -229,6 +240,8 @@ const ( BS0 = 0x0 BS1 = 0x2000 BSDLY = 0x2000 + BTRFS_SUPER_MAGIC = 0x9123683e + BTRFS_TEST_MAGIC = 0x73727279 CAN_BCM = 0x2 CAN_EFF_FLAG = 0x80000000 CAN_EFF_ID_BITS = 0x1d @@ -252,6 +265,8 @@ const ( CBAUD = 0x100f CBAUDEX = 0x1000 CFLUSH = 0xf + CGROUP2_SUPER_MAGIC = 0x63677270 + CGROUP_SUPER_MAGIC = 0x27e0eb CIBAUD = 0x100f0000 CLOCAL = 0x800 CLOCK_BOOTTIME = 0x7 @@ -294,10 +309,12 @@ const ( CLONE_VFORK = 0x4000 CLONE_VM = 0x100 CMSPAR = 0x40000000 + CODA_SUPER_MAGIC = 0x73757245 CR0 = 0x0 CR1 = 0x200 CR2 = 0x400 CR3 = 0x600 + CRAMFS_MAGIC = 0x28cd3d45 CRDLY = 0x600 CREAD = 0x80 CRTSCTS = 0x80000000 @@ -312,6 +329,9 @@ const ( CSTOP = 0x13 CSTOPB = 0x40 CSUSP = 0x1a + DAXFS_MAGIC = 0x64646178 + DEBUGFS_MAGIC = 0x64626720 + DEVPTS_SUPER_MAGIC = 0x1cd1 DT_BLK = 0x6 DT_CHR = 0x2 DT_DIR = 0x4 @@ -328,9 +348,12 @@ const ( ECHOKE = 0x800 ECHONL = 0x40 ECHOPRT = 0x400 + ECRYPTFS_SUPER_MAGIC = 0xf15f EFD_CLOEXEC = 0x80000 EFD_NONBLOCK = 0x800 EFD_SEMAPHORE = 0x1 + EFIVARFS_MAGIC = 0xde5e81e4 + EFS_SUPER_MAGIC = 0x414a53 ENCODING_DEFAULT = 0x0 ENCODING_FM_MARK = 0x3 ENCODING_FM_SPACE = 0x4 @@ -393,6 +416,7 @@ const ( ETH_P_ECONET = 0x18 ETH_P_EDSA = 0xdada ETH_P_ERSPAN = 0x88be + ETH_P_ERSPAN2 = 0x22eb ETH_P_FCOE = 0x8906 ETH_P_FIP = 0x8914 ETH_P_HDLC = 0x19 @@ -446,10 +470,15 @@ const ( ETH_P_WCCP = 0x883e ETH_P_X25 = 0x805 ETH_P_XDSA = 0xf8 + EXABYTE_ENABLE_NEST = 0xf0 + EXT2_SUPER_MAGIC = 0xef53 + EXT3_SUPER_MAGIC = 0xef53 + EXT4_SUPER_MAGIC = 0xef53 EXTA = 0xe EXTB = 0xf EXTPROC = 0x10000 EXTRA_MAGIC = 0x45585401 + F2FS_SUPER_MAGIC = 0xf2f52010 FALLOC_FL_COLLAPSE_RANGE = 0x8 FALLOC_FL_INSERT_RANGE = 0x20 FALLOC_FL_KEEP_SIZE = 0x1 @@ -463,6 +492,7 @@ const ( FF1 = 0x8000 FFDLY = 0x8000 FLUSHO = 0x1000 + FPSIMD_MAGIC = 0x46508001 FS_ENCRYPTION_MODE_AES_128_CBC = 0x5 FS_ENCRYPTION_MODE_AES_128_CTS = 0x6 FS_ENCRYPTION_MODE_AES_256_CBC = 0x3 @@ -483,6 +513,7 @@ const ( FS_POLICY_FLAGS_PAD_8 = 0x1 FS_POLICY_FLAGS_PAD_MASK = 0x3 FS_POLICY_FLAGS_VALID = 0x3 + FUTEXFS_SUPER_MAGIC = 0xbad1dea F_ADD_SEALS = 0x409 F_DUPFD = 0x0 F_DUPFD_CLOEXEC = 0x406 @@ -544,6 +575,49 @@ const ( GENL_UNS_ADMIN_PERM = 0x10 GRND_NONBLOCK = 0x1 GRND_RANDOM = 0x2 + HDIO_DRIVE_CMD = 0x31f + HDIO_DRIVE_CMD_AEB = 0x31e + HDIO_DRIVE_CMD_HDR_SIZE = 0x4 + HDIO_DRIVE_HOB_HDR_SIZE = 0x8 + HDIO_DRIVE_RESET = 0x31c + HDIO_DRIVE_TASK = 0x31e + HDIO_DRIVE_TASKFILE = 0x31d + HDIO_DRIVE_TASK_HDR_SIZE = 0x8 + HDIO_GETGEO = 0x301 + HDIO_GET_32BIT = 0x309 + HDIO_GET_ACOUSTIC = 0x30f + HDIO_GET_ADDRESS = 0x310 + HDIO_GET_BUSSTATE = 0x31a + HDIO_GET_DMA = 0x30b + HDIO_GET_IDENTITY = 0x30d + HDIO_GET_KEEPSETTINGS = 0x308 + HDIO_GET_MULTCOUNT = 0x304 + HDIO_GET_NICE = 0x30c + HDIO_GET_NOWERR = 0x30a + HDIO_GET_QDMA = 0x305 + HDIO_GET_UNMASKINTR = 0x302 + HDIO_GET_WCACHE = 0x30e + HDIO_OBSOLETE_IDENTITY = 0x307 + HDIO_SCAN_HWIF = 0x328 + HDIO_SET_32BIT = 0x324 + HDIO_SET_ACOUSTIC = 0x32c + HDIO_SET_ADDRESS = 0x32f + HDIO_SET_BUSSTATE = 0x32d + HDIO_SET_DMA = 0x326 + HDIO_SET_KEEPSETTINGS = 0x323 + HDIO_SET_MULTCOUNT = 0x321 + HDIO_SET_NICE = 0x329 + HDIO_SET_NOWERR = 0x325 + HDIO_SET_PIO_MODE = 0x327 + HDIO_SET_QDMA = 0x32e + HDIO_SET_UNMASKINTR = 0x322 + HDIO_SET_WCACHE = 0x32b + HDIO_SET_XFER = 0x306 + HDIO_TRISTATE_HWIF = 0x31b + HDIO_UNREGISTER_HWIF = 0x32a + HOSTFS_SUPER_MAGIC = 0xc0ffee + HPFS_SUPER_MAGIC = 0xf995e849 + HUGETLBFS_MAGIC = 0x958458f6 HUPCL = 0x400 IBSHIFT = 0x10 ICANON = 0x2 @@ -794,12 +868,14 @@ const ( IP_UNICAST_IF = 0x32 IP_XFRM_POLICY = 0x11 ISIG = 0x1 + ISOFS_SUPER_MAGIC = 0x9660 ISTRIP = 0x20 IUCLC = 0x200 IUTF8 = 0x4000 IXANY = 0x800 IXOFF = 0x1000 IXON = 0x400 + JFFS2_SUPER_MAGIC = 0x72b6 KEYCTL_ASSUME_AUTHORITY = 0x10 KEYCTL_CHOWN = 0x4 KEYCTL_CLEAR = 0x7 @@ -895,9 +971,15 @@ const ( MCL_CURRENT = 0x1 MCL_FUTURE = 0x2 MCL_ONFAULT = 0x4 + MINIX2_SUPER_MAGIC = 0x2468 + MINIX2_SUPER_MAGIC2 = 0x2478 + MINIX3_SUPER_MAGIC = 0x4d5a + MINIX_SUPER_MAGIC = 0x137f + MINIX_SUPER_MAGIC2 = 0x138f MNT_DETACH = 0x2 MNT_EXPIRE = 0x4 MNT_FORCE = 0x1 + MSDOS_SUPER_MAGIC = 0x4d44 MSG_BATCH = 0x40000 MSG_CMSG_CLOEXEC = 0x40000000 MSG_CONFIRM = 0x800 @@ -957,7 +1039,9 @@ const ( MS_SYNCHRONOUS = 0x10 MS_UNBINDABLE = 0x20000 MS_VERBOSE = 0x8000 + MTD_INODE_FS_MAGIC = 0x11307854 NAME_MAX = 0xff + NCP_SUPER_MAGIC = 0x564c NETLINK_ADD_MEMBERSHIP = 0x1 NETLINK_AUDIT = 0x9 NETLINK_BROADCAST_ERROR = 0x4 @@ -992,6 +1076,37 @@ const ( NETLINK_UNUSED = 0x1 NETLINK_USERSOCK = 0x2 NETLINK_XFRM = 0x6 + NFNETLINK_V0 = 0x0 + NFNLGRP_ACCT_QUOTA = 0x8 + NFNLGRP_CONNTRACK_DESTROY = 0x3 + NFNLGRP_CONNTRACK_EXP_DESTROY = 0x6 + NFNLGRP_CONNTRACK_EXP_NEW = 0x4 + NFNLGRP_CONNTRACK_EXP_UPDATE = 0x5 + NFNLGRP_CONNTRACK_NEW = 0x1 + NFNLGRP_CONNTRACK_UPDATE = 0x2 + NFNLGRP_MAX = 0x9 + NFNLGRP_NFTABLES = 0x7 + NFNLGRP_NFTRACE = 0x9 + NFNLGRP_NONE = 0x0 + NFNL_BATCH_MAX = 0x1 + NFNL_MSG_BATCH_BEGIN = 0x10 + NFNL_MSG_BATCH_END = 0x11 + NFNL_NFA_NEST = 0x8000 + NFNL_SUBSYS_ACCT = 0x7 + NFNL_SUBSYS_COUNT = 0xc + NFNL_SUBSYS_CTHELPER = 0x9 + NFNL_SUBSYS_CTNETLINK = 0x1 + NFNL_SUBSYS_CTNETLINK_EXP = 0x2 + NFNL_SUBSYS_CTNETLINK_TIMEOUT = 0x8 + NFNL_SUBSYS_IPSET = 0x6 + NFNL_SUBSYS_NFTABLES = 0xa + NFNL_SUBSYS_NFT_COMPAT = 0xb + NFNL_SUBSYS_NONE = 0x0 + NFNL_SUBSYS_OSF = 0x5 + NFNL_SUBSYS_QUEUE = 0x3 + NFNL_SUBSYS_ULOG = 0x4 + NFS_SUPER_MAGIC = 0x6969 + NILFS_SUPER_MAGIC = 0x3434 NL0 = 0x0 NL1 = 0x100 NLA_ALIGNTO = 0x4 @@ -1024,6 +1139,8 @@ const ( NLM_F_REQUEST = 0x1 NLM_F_ROOT = 0x100 NOFLSH = 0x80 + NSFS_MAGIC = 0x6e736673 + OCFS2_SUPER_MAGIC = 0x7461636f OCRNL = 0x8 OFDEL = 0x80 OFILL = 0x40 @@ -1031,7 +1148,9 @@ const ( ONLCR = 0x4 ONLRET = 0x20 ONOCR = 0x10 + OPENPROM_SUPER_MAGIC = 0x9fa1 OPOST = 0x1 + OVERLAYFS_SUPER_MAGIC = 0x794c7630 O_ACCMODE = 0x3 O_APPEND = 0x400 O_ASYNC = 0x2000 @@ -1118,14 +1237,17 @@ const ( PERF_EVENT_IOC_ID = 0x80082407 PERF_EVENT_IOC_PAUSE_OUTPUT = 0x40042409 PERF_EVENT_IOC_PERIOD = 0x40082404 + PERF_EVENT_IOC_QUERY_BPF = 0xc008240a PERF_EVENT_IOC_REFRESH = 0x2402 PERF_EVENT_IOC_RESET = 0x2403 PERF_EVENT_IOC_SET_BPF = 0x40042408 PERF_EVENT_IOC_SET_FILTER = 0x40082406 PERF_EVENT_IOC_SET_OUTPUT = 0x2405 + PIPEFS_MAGIC = 0x50495045 PRIO_PGRP = 0x1 PRIO_PROCESS = 0x0 PRIO_USER = 0x2 + PROC_SUPER_MAGIC = 0x9fa0 PROT_EXEC = 0x4 PROT_GROWSDOWN = 0x1000000 PROT_GROWSUP = 0x2000000 @@ -1231,6 +1353,7 @@ const ( PR_TSC_SIGSEGV = 0x2 PR_UNALIGN_NOPRINT = 0x1 PR_UNALIGN_SIGBUS = 0x2 + PSTOREFS_MAGIC = 0x6165676c PTRACE_ATTACH = 0x10 PTRACE_CONT = 0x7 PTRACE_DETACH = 0x11 @@ -1270,6 +1393,7 @@ const ( PTRACE_POKETEXT = 0x4 PTRACE_POKEUSR = 0x6 PTRACE_SECCOMP_GET_FILTER = 0x420c + PTRACE_SECCOMP_GET_METADATA = 0x420d PTRACE_SEIZE = 0x4206 PTRACE_SETOPTIONS = 0x4200 PTRACE_SETREGS = 0xd @@ -1279,6 +1403,11 @@ const ( PTRACE_SINGLESTEP = 0x9 PTRACE_SYSCALL = 0x18 PTRACE_TRACEME = 0x0 + QNX4_SUPER_MAGIC = 0x2f + QNX6_SUPER_MAGIC = 0x68191122 + RAMFS_MAGIC = 0x858458f6 + RDTGROUP_SUPER_MAGIC = 0x7655821 + REISERFS_SUPER_MAGIC = 0x52654973 RLIMIT_AS = 0x9 RLIMIT_CORE = 0x4 RLIMIT_CPU = 0x0 @@ -1464,6 +1593,8 @@ const ( SECCOMP_MODE_DISABLED = 0x0 SECCOMP_MODE_FILTER = 0x2 SECCOMP_MODE_STRICT = 0x1 + SECURITYFS_MAGIC = 0x73636673 + SELINUX_MAGIC = 0xf97cff8c SHUT_RD = 0x0 SHUT_RDWR = 0x2 SHUT_WR = 0x1 @@ -1548,6 +1679,23 @@ const ( SIOCSPGRP = 0x8902 SIOCSRARP = 0x8962 SIOCWANDEV = 0x894a + SMACK_MAGIC = 0x43415d53 + SMART_AUTOSAVE = 0xd2 + SMART_AUTO_OFFLINE = 0xdb + SMART_DISABLE = 0xd9 + SMART_ENABLE = 0xd8 + SMART_HCYL_PASS = 0xc2 + SMART_IMMEDIATE_OFFLINE = 0xd4 + SMART_LCYL_PASS = 0x4f + SMART_READ_LOG_SECTOR = 0xd5 + SMART_READ_THRESHOLDS = 0xd1 + SMART_READ_VALUES = 0xd0 + SMART_SAVE = 0xd3 + SMART_STATUS = 0xda + SMART_WRITE_LOG_SECTOR = 0xd6 + SMART_WRITE_THRESHOLDS = 0xd7 + SMB_SUPER_MAGIC = 0x517b + SOCKFS_MAGIC = 0x534f434b SOCK_CLOEXEC = 0x80000 SOCK_DCCP = 0x6 SOCK_DGRAM = 0x2 @@ -1658,6 +1806,8 @@ const ( SPLICE_F_MORE = 0x4 SPLICE_F_MOVE = 0x1 SPLICE_F_NONBLOCK = 0x2 + SQUASHFS_MAGIC = 0x73717368 + STACK_END_MAGIC = 0x57ac6e9d STATX_ALL = 0xfff STATX_ATIME = 0x20 STATX_ATTR_APPEND = 0x20 @@ -1679,6 +1829,8 @@ const ( STATX_TYPE = 0x1 STATX_UID = 0x8 STATX__RESERVED = 0x80000000 + SVE_MAGIC = 0x53564501 + SYSFS_MAGIC = 0x62656572 S_BLKSIZE = 0x200 S_IEXEC = 0x40 S_IFBLK = 0x6000 @@ -1860,7 +2012,27 @@ const ( TIOCSTI = 0x5412 TIOCSWINSZ = 0x5414 TIOCVHANGUP = 0x5437 + TMPFS_MAGIC = 0x1021994 TOSTOP = 0x100 + TPACKET_ALIGNMENT = 0x10 + TPACKET_HDRLEN = 0x34 + TP_STATUS_AVAILABLE = 0x0 + TP_STATUS_BLK_TMO = 0x20 + TP_STATUS_COPY = 0x2 + TP_STATUS_CSUMNOTREADY = 0x8 + TP_STATUS_CSUM_VALID = 0x80 + TP_STATUS_KERNEL = 0x0 + TP_STATUS_LOSING = 0x4 + TP_STATUS_SENDING = 0x2 + TP_STATUS_SEND_REQUEST = 0x1 + TP_STATUS_TS_RAW_HARDWARE = -0x80000000 + TP_STATUS_TS_SOFTWARE = 0x20000000 + TP_STATUS_TS_SYS_HARDWARE = 0x40000000 + TP_STATUS_USER = 0x1 + TP_STATUS_VLAN_TPID_VALID = 0x40 + TP_STATUS_VLAN_VALID = 0x10 + TP_STATUS_WRONG_FORMAT = 0x4 + TRACEFS_MAGIC = 0x74726163 TS_COMM_LEN = 0x20 TUNATTACHFILTER = 0x401054d5 TUNDETACHFILTER = 0x401054d6 @@ -1872,6 +2044,7 @@ const ( TUNGETVNETHDRSZ = 0x800454d7 TUNGETVNETLE = 0x800454dd TUNSETDEBUG = 0x400454c9 + TUNSETFILTEREBPF = 0x800454e1 TUNSETGROUP = 0x400454ce TUNSETIFF = 0x400454ca TUNSETIFINDEX = 0x400454da @@ -1882,13 +2055,17 @@ const ( TUNSETPERSIST = 0x400454cb TUNSETQUEUE = 0x400454d9 TUNSETSNDBUF = 0x400454d4 + TUNSETSTEERINGEBPF = 0x800454e0 TUNSETTXFILTER = 0x400454d1 TUNSETVNETBE = 0x400454de TUNSETVNETHDRSZ = 0x400454d8 TUNSETVNETLE = 0x400454dc + UDF_SUPER_MAGIC = 0x15013346 UMOUNT_NOFOLLOW = 0x8 + USBDEVICE_SUPER_MAGIC = 0x9fa2 UTIME_NOW = 0x3fffffff UTIME_OMIT = 0x3ffffffe + V9FS_MAGIC = 0x1021997 VDISCARD = 0xd VEOF = 0x4 VEOL = 0xb @@ -1930,6 +2107,86 @@ const ( WDIOC_SETPRETIMEOUT = 0xc0045708 WDIOC_SETTIMEOUT = 0xc0045706 WEXITED = 0x4 + WIN_ACKMEDIACHANGE = 0xdb + WIN_CHECKPOWERMODE1 = 0xe5 + WIN_CHECKPOWERMODE2 = 0x98 + WIN_DEVICE_RESET = 0x8 + WIN_DIAGNOSE = 0x90 + WIN_DOORLOCK = 0xde + WIN_DOORUNLOCK = 0xdf + WIN_DOWNLOAD_MICROCODE = 0x92 + WIN_FLUSH_CACHE = 0xe7 + WIN_FLUSH_CACHE_EXT = 0xea + WIN_FORMAT = 0x50 + WIN_GETMEDIASTATUS = 0xda + WIN_IDENTIFY = 0xec + WIN_IDENTIFY_DMA = 0xee + WIN_IDLEIMMEDIATE = 0xe1 + WIN_INIT = 0x60 + WIN_MEDIAEJECT = 0xed + WIN_MULTREAD = 0xc4 + WIN_MULTREAD_EXT = 0x29 + WIN_MULTWRITE = 0xc5 + WIN_MULTWRITE_EXT = 0x39 + WIN_NOP = 0x0 + WIN_PACKETCMD = 0xa0 + WIN_PIDENTIFY = 0xa1 + WIN_POSTBOOT = 0xdc + WIN_PREBOOT = 0xdd + WIN_QUEUED_SERVICE = 0xa2 + WIN_READ = 0x20 + WIN_READDMA = 0xc8 + WIN_READDMA_EXT = 0x25 + WIN_READDMA_ONCE = 0xc9 + WIN_READDMA_QUEUED = 0xc7 + WIN_READDMA_QUEUED_EXT = 0x26 + WIN_READ_BUFFER = 0xe4 + WIN_READ_EXT = 0x24 + WIN_READ_LONG = 0x22 + WIN_READ_LONG_ONCE = 0x23 + WIN_READ_NATIVE_MAX = 0xf8 + WIN_READ_NATIVE_MAX_EXT = 0x27 + WIN_READ_ONCE = 0x21 + WIN_RECAL = 0x10 + WIN_RESTORE = 0x10 + WIN_SECURITY_DISABLE = 0xf6 + WIN_SECURITY_ERASE_PREPARE = 0xf3 + WIN_SECURITY_ERASE_UNIT = 0xf4 + WIN_SECURITY_FREEZE_LOCK = 0xf5 + WIN_SECURITY_SET_PASS = 0xf1 + WIN_SECURITY_UNLOCK = 0xf2 + WIN_SEEK = 0x70 + WIN_SETFEATURES = 0xef + WIN_SETIDLE1 = 0xe3 + WIN_SETIDLE2 = 0x97 + WIN_SETMULT = 0xc6 + WIN_SET_MAX = 0xf9 + WIN_SET_MAX_EXT = 0x37 + WIN_SLEEPNOW1 = 0xe6 + WIN_SLEEPNOW2 = 0x99 + WIN_SMART = 0xb0 + WIN_SPECIFY = 0x91 + WIN_SRST = 0x8 + WIN_STANDBY = 0xe2 + WIN_STANDBY2 = 0x96 + WIN_STANDBYNOW1 = 0xe0 + WIN_STANDBYNOW2 = 0x94 + WIN_VERIFY = 0x40 + WIN_VERIFY_EXT = 0x42 + WIN_VERIFY_ONCE = 0x41 + WIN_WRITE = 0x30 + WIN_WRITEDMA = 0xca + WIN_WRITEDMA_EXT = 0x35 + WIN_WRITEDMA_ONCE = 0xcb + WIN_WRITEDMA_QUEUED = 0xcc + WIN_WRITEDMA_QUEUED_EXT = 0x36 + WIN_WRITE_BUFFER = 0xe8 + WIN_WRITE_EXT = 0x34 + WIN_WRITE_LONG = 0x32 + WIN_WRITE_LONG_ONCE = 0x33 + WIN_WRITE_ONCE = 0x31 + WIN_WRITE_SAME = 0xe9 + WIN_WRITE_VERIFY = 0x3c WNOHANG = 0x1 WNOTHREAD = 0x20000000 WNOWAIT = 0x1000000 @@ -1939,7 +2196,9 @@ const ( XATTR_CREATE = 0x1 XATTR_REPLACE = 0x2 XCASE = 0x4 + XENFS_SUPER_MAGIC = 0xabba1974 XTABS = 0x1800 + ZSMALLOC_MAGIC = 0x58295829 ) // Errors @@ -2119,171 +2378,179 @@ const ( ) // Error table -var errors = [...]string{ - 1: "operation not permitted", - 2: "no such file or directory", - 3: "no such process", - 4: "interrupted system call", - 5: "input/output error", - 6: "no such device or address", - 7: "argument list too long", - 8: "exec format error", - 9: "bad file descriptor", - 10: "no child processes", - 11: "resource temporarily unavailable", - 12: "cannot allocate memory", - 13: "permission denied", - 14: "bad address", - 15: "block device required", - 16: "device or resource busy", - 17: "file exists", - 18: "invalid cross-device link", - 19: "no such device", - 20: "not a directory", - 21: "is a directory", - 22: "invalid argument", - 23: "too many open files in system", - 24: "too many open files", - 25: "inappropriate ioctl for device", - 26: "text file busy", - 27: "file too large", - 28: "no space left on device", - 29: "illegal seek", - 30: "read-only file system", - 31: "too many links", - 32: "broken pipe", - 33: "numerical argument out of domain", - 34: "numerical result out of range", - 35: "resource deadlock avoided", - 36: "file name too long", - 37: "no locks available", - 38: "function not implemented", - 39: "directory not empty", - 40: "too many levels of symbolic links", - 42: "no message of desired type", - 43: "identifier removed", - 44: "channel number out of range", - 45: "level 2 not synchronized", - 46: "level 3 halted", - 47: "level 3 reset", - 48: "link number out of range", - 49: "protocol driver not attached", - 50: "no CSI structure available", - 51: "level 2 halted", - 52: "invalid exchange", - 53: "invalid request descriptor", - 54: "exchange full", - 55: "no anode", - 56: "invalid request code", - 57: "invalid slot", - 59: "bad font file format", - 60: "device not a stream", - 61: "no data available", - 62: "timer expired", - 63: "out of streams resources", - 64: "machine is not on the network", - 65: "package not installed", - 66: "object is remote", - 67: "link has been severed", - 68: "advertise error", - 69: "srmount error", - 70: "communication error on send", - 71: "protocol error", - 72: "multihop attempted", - 73: "RFS specific error", - 74: "bad message", - 75: "value too large for defined data type", - 76: "name not unique on network", - 77: "file descriptor in bad state", - 78: "remote address changed", - 79: "can not access a needed shared library", - 80: "accessing a corrupted shared library", - 81: ".lib section in a.out corrupted", - 82: "attempting to link in too many shared libraries", - 83: "cannot exec a shared library directly", - 84: "invalid or incomplete multibyte or wide character", - 85: "interrupted system call should be restarted", - 86: "streams pipe error", - 87: "too many users", - 88: "socket operation on non-socket", - 89: "destination address required", - 90: "message too long", - 91: "protocol wrong type for socket", - 92: "protocol not available", - 93: "protocol not supported", - 94: "socket type not supported", - 95: "operation not supported", - 96: "protocol family not supported", - 97: "address family not supported by protocol", - 98: "address already in use", - 99: "cannot assign requested address", - 100: "network is down", - 101: "network is unreachable", - 102: "network dropped connection on reset", - 103: "software caused connection abort", - 104: "connection reset by peer", - 105: "no buffer space available", - 106: "transport endpoint is already connected", - 107: "transport endpoint is not connected", - 108: "cannot send after transport endpoint shutdown", - 109: "too many references: cannot splice", - 110: "connection timed out", - 111: "connection refused", - 112: "host is down", - 113: "no route to host", - 114: "operation already in progress", - 115: "operation now in progress", - 116: "stale file handle", - 117: "structure needs cleaning", - 118: "not a XENIX named type file", - 119: "no XENIX semaphores available", - 120: "is a named type file", - 121: "remote I/O error", - 122: "disk quota exceeded", - 123: "no medium found", - 124: "wrong medium type", - 125: "operation canceled", - 126: "required key not available", - 127: "key has expired", - 128: "key has been revoked", - 129: "key was rejected by service", - 130: "owner died", - 131: "state not recoverable", - 132: "operation not possible due to RF-kill", - 133: "memory page has hardware error", +var errorList = [...]struct { + num syscall.Errno + name string + desc string +}{ + {1, "EPERM", "operation not permitted"}, + {2, "ENOENT", "no such file or directory"}, + {3, "ESRCH", "no such process"}, + {4, "EINTR", "interrupted system call"}, + {5, "EIO", "input/output error"}, + {6, "ENXIO", "no such device or address"}, + {7, "E2BIG", "argument list too long"}, + {8, "ENOEXEC", "exec format error"}, + {9, "EBADF", "bad file descriptor"}, + {10, "ECHILD", "no child processes"}, + {11, "EAGAIN", "resource temporarily unavailable"}, + {12, "ENOMEM", "cannot allocate memory"}, + {13, "EACCES", "permission denied"}, + {14, "EFAULT", "bad address"}, + {15, "ENOTBLK", "block device required"}, + {16, "EBUSY", "device or resource busy"}, + {17, "EEXIST", "file exists"}, + {18, "EXDEV", "invalid cross-device link"}, + {19, "ENODEV", "no such device"}, + {20, "ENOTDIR", "not a directory"}, + {21, "EISDIR", "is a directory"}, + {22, "EINVAL", "invalid argument"}, + {23, "ENFILE", "too many open files in system"}, + {24, "EMFILE", "too many open files"}, + {25, "ENOTTY", "inappropriate ioctl for device"}, + {26, "ETXTBSY", "text file busy"}, + {27, "EFBIG", "file too large"}, + {28, "ENOSPC", "no space left on device"}, + {29, "ESPIPE", "illegal seek"}, + {30, "EROFS", "read-only file system"}, + {31, "EMLINK", "too many links"}, + {32, "EPIPE", "broken pipe"}, + {33, "EDOM", "numerical argument out of domain"}, + {34, "ERANGE", "numerical result out of range"}, + {35, "EDEADLK", "resource deadlock avoided"}, + {36, "ENAMETOOLONG", "file name too long"}, + {37, "ENOLCK", "no locks available"}, + {38, "ENOSYS", "function not implemented"}, + {39, "ENOTEMPTY", "directory not empty"}, + {40, "ELOOP", "too many levels of symbolic links"}, + {42, "ENOMSG", "no message of desired type"}, + {43, "EIDRM", "identifier removed"}, + {44, "ECHRNG", "channel number out of range"}, + {45, "EL2NSYNC", "level 2 not synchronized"}, + {46, "EL3HLT", "level 3 halted"}, + {47, "EL3RST", "level 3 reset"}, + {48, "ELNRNG", "link number out of range"}, + {49, "EUNATCH", "protocol driver not attached"}, + {50, "ENOCSI", "no CSI structure available"}, + {51, "EL2HLT", "level 2 halted"}, + {52, "EBADE", "invalid exchange"}, + {53, "EBADR", "invalid request descriptor"}, + {54, "EXFULL", "exchange full"}, + {55, "ENOANO", "no anode"}, + {56, "EBADRQC", "invalid request code"}, + {57, "EBADSLT", "invalid slot"}, + {59, "EBFONT", "bad font file format"}, + {60, "ENOSTR", "device not a stream"}, + {61, "ENODATA", "no data available"}, + {62, "ETIME", "timer expired"}, + {63, "ENOSR", "out of streams resources"}, + {64, "ENONET", "machine is not on the network"}, + {65, "ENOPKG", "package not installed"}, + {66, "EREMOTE", "object is remote"}, + {67, "ENOLINK", "link has been severed"}, + {68, "EADV", "advertise error"}, + {69, "ESRMNT", "srmount error"}, + {70, "ECOMM", "communication error on send"}, + {71, "EPROTO", "protocol error"}, + {72, "EMULTIHOP", "multihop attempted"}, + {73, "EDOTDOT", "RFS specific error"}, + {74, "EBADMSG", "bad message"}, + {75, "EOVERFLOW", "value too large for defined data type"}, + {76, "ENOTUNIQ", "name not unique on network"}, + {77, "EBADFD", "file descriptor in bad state"}, + {78, "EREMCHG", "remote address changed"}, + {79, "ELIBACC", "can not access a needed shared library"}, + {80, "ELIBBAD", "accessing a corrupted shared library"}, + {81, "ELIBSCN", ".lib section in a.out corrupted"}, + {82, "ELIBMAX", "attempting to link in too many shared libraries"}, + {83, "ELIBEXEC", "cannot exec a shared library directly"}, + {84, "EILSEQ", "invalid or incomplete multibyte or wide character"}, + {85, "ERESTART", "interrupted system call should be restarted"}, + {86, "ESTRPIPE", "streams pipe error"}, + {87, "EUSERS", "too many users"}, + {88, "ENOTSOCK", "socket operation on non-socket"}, + {89, "EDESTADDRREQ", "destination address required"}, + {90, "EMSGSIZE", "message too long"}, + {91, "EPROTOTYPE", "protocol wrong type for socket"}, + {92, "ENOPROTOOPT", "protocol not available"}, + {93, "EPROTONOSUPPORT", "protocol not supported"}, + {94, "ESOCKTNOSUPPORT", "socket type not supported"}, + {95, "ENOTSUP", "operation not supported"}, + {96, "EPFNOSUPPORT", "protocol family not supported"}, + {97, "EAFNOSUPPORT", "address family not supported by protocol"}, + {98, "EADDRINUSE", "address already in use"}, + {99, "EADDRNOTAVAIL", "cannot assign requested address"}, + {100, "ENETDOWN", "network is down"}, + {101, "ENETUNREACH", "network is unreachable"}, + {102, "ENETRESET", "network dropped connection on reset"}, + {103, "ECONNABORTED", "software caused connection abort"}, + {104, "ECONNRESET", "connection reset by peer"}, + {105, "ENOBUFS", "no buffer space available"}, + {106, "EISCONN", "transport endpoint is already connected"}, + {107, "ENOTCONN", "transport endpoint is not connected"}, + {108, "ESHUTDOWN", "cannot send after transport endpoint shutdown"}, + {109, "ETOOMANYREFS", "too many references: cannot splice"}, + {110, "ETIMEDOUT", "connection timed out"}, + {111, "ECONNREFUSED", "connection refused"}, + {112, "EHOSTDOWN", "host is down"}, + {113, "EHOSTUNREACH", "no route to host"}, + {114, "EALREADY", "operation already in progress"}, + {115, "EINPROGRESS", "operation now in progress"}, + {116, "ESTALE", "stale file handle"}, + {117, "EUCLEAN", "structure needs cleaning"}, + {118, "ENOTNAM", "not a XENIX named type file"}, + {119, "ENAVAIL", "no XENIX semaphores available"}, + {120, "EISNAM", "is a named type file"}, + {121, "EREMOTEIO", "remote I/O error"}, + {122, "EDQUOT", "disk quota exceeded"}, + {123, "ENOMEDIUM", "no medium found"}, + {124, "EMEDIUMTYPE", "wrong medium type"}, + {125, "ECANCELED", "operation canceled"}, + {126, "ENOKEY", "required key not available"}, + {127, "EKEYEXPIRED", "key has expired"}, + {128, "EKEYREVOKED", "key has been revoked"}, + {129, "EKEYREJECTED", "key was rejected by service"}, + {130, "EOWNERDEAD", "owner died"}, + {131, "ENOTRECOVERABLE", "state not recoverable"}, + {132, "ERFKILL", "operation not possible due to RF-kill"}, + {133, "EHWPOISON", "memory page has hardware error"}, } // Signal table -var signals = [...]string{ - 1: "hangup", - 2: "interrupt", - 3: "quit", - 4: "illegal instruction", - 5: "trace/breakpoint trap", - 6: "aborted", - 7: "bus error", - 8: "floating point exception", - 9: "killed", - 10: "user defined signal 1", - 11: "segmentation fault", - 12: "user defined signal 2", - 13: "broken pipe", - 14: "alarm clock", - 15: "terminated", - 16: "stack fault", - 17: "child exited", - 18: "continued", - 19: "stopped (signal)", - 20: "stopped", - 21: "stopped (tty input)", - 22: "stopped (tty output)", - 23: "urgent I/O condition", - 24: "CPU time limit exceeded", - 25: "file size limit exceeded", - 26: "virtual timer expired", - 27: "profiling timer expired", - 28: "window changed", - 29: "I/O possible", - 30: "power failure", - 31: "bad system call", +var signalList = [...]struct { + num syscall.Signal + name string + desc string +}{ + {1, "SIGHUP", "hangup"}, + {2, "SIGINT", "interrupt"}, + {3, "SIGQUIT", "quit"}, + {4, "SIGILL", "illegal instruction"}, + {5, "SIGTRAP", "trace/breakpoint trap"}, + {6, "SIGABRT", "aborted"}, + {7, "SIGBUS", "bus error"}, + {8, "SIGFPE", "floating point exception"}, + {9, "SIGKILL", "killed"}, + {10, "SIGUSR1", "user defined signal 1"}, + {11, "SIGSEGV", "segmentation fault"}, + {12, "SIGUSR2", "user defined signal 2"}, + {13, "SIGPIPE", "broken pipe"}, + {14, "SIGALRM", "alarm clock"}, + {15, "SIGTERM", "terminated"}, + {16, "SIGSTKFLT", "stack fault"}, + {17, "SIGCHLD", "child exited"}, + {18, "SIGCONT", "continued"}, + {19, "SIGSTOP", "stopped (signal)"}, + {20, "SIGTSTP", "stopped"}, + {21, "SIGTTIN", "stopped (tty input)"}, + {22, "SIGTTOU", "stopped (tty output)"}, + {23, "SIGURG", "urgent I/O condition"}, + {24, "SIGXCPU", "CPU time limit exceeded"}, + {25, "SIGXFSZ", "file size limit exceeded"}, + {26, "SIGVTALRM", "virtual timer expired"}, + {27, "SIGPROF", "profiling timer expired"}, + {28, "SIGWINCH", "window changed"}, + {29, "SIGIO", "I/O possible"}, + {30, "SIGPWR", "power failure"}, + {31, "SIGSYS", "bad system call"}, } diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go index 8aec95d6c24..465ff2f0f3c 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go @@ -11,6 +11,11 @@ package unix import "syscall" const ( + AAFS_MAGIC = 0x5a3c69f0 + ADFS_SUPER_MAGIC = 0xadf5 + AFFS_SUPER_MAGIC = 0xadff + AFS_FS_MAGIC = 0x6b414653 + AFS_SUPER_MAGIC = 0x5346414f AF_ALG = 0x26 AF_APPLETALK = 0x5 AF_ASH = 0x12 @@ -66,6 +71,7 @@ const ( ALG_SET_IV = 0x2 ALG_SET_KEY = 0x1 ALG_SET_OP = 0x3 + ANON_INODE_FS_MAGIC = 0x9041934 ARPHRD_6LOWPAN = 0x339 ARPHRD_ADAPT = 0x108 ARPHRD_APPLETLK = 0x8 @@ -133,6 +139,7 @@ const ( ARPHRD_VOID = 0xffff ARPHRD_VSOCKMON = 0x33a ARPHRD_X25 = 0x10f + AUTOFS_SUPER_MAGIC = 0x187 B0 = 0x0 B1000000 = 0x1008 B110 = 0x3 @@ -164,6 +171,9 @@ const ( B75 = 0x2 B921600 = 0x1007 B9600 = 0xd + BALLOON_KVM_MAGIC = 0x13661366 + BDEVFS_MAGIC = 0x62646576 + BINFMTFS_MAGIC = 0x42494e4d BLKBSZGET = 0x40041270 BLKBSZSET = 0x80041271 BLKFLSBUF = 0x20001261 @@ -188,6 +198,7 @@ const ( BPF_AND = 0x50 BPF_B = 0x10 BPF_DIV = 0x30 + BPF_FS_MAGIC = 0xcafe4a11 BPF_H = 0x8 BPF_IMM = 0x0 BPF_IND = 0x40 @@ -229,6 +240,8 @@ const ( BS0 = 0x0 BS1 = 0x2000 BSDLY = 0x2000 + BTRFS_SUPER_MAGIC = 0x9123683e + BTRFS_TEST_MAGIC = 0x73727279 CAN_BCM = 0x2 CAN_EFF_FLAG = 0x80000000 CAN_EFF_ID_BITS = 0x1d @@ -252,6 +265,8 @@ const ( CBAUD = 0x100f CBAUDEX = 0x1000 CFLUSH = 0xf + CGROUP2_SUPER_MAGIC = 0x63677270 + CGROUP_SUPER_MAGIC = 0x27e0eb CIBAUD = 0x100f0000 CLOCAL = 0x800 CLOCK_BOOTTIME = 0x7 @@ -294,10 +309,12 @@ const ( CLONE_VFORK = 0x4000 CLONE_VM = 0x100 CMSPAR = 0x40000000 + CODA_SUPER_MAGIC = 0x73757245 CR0 = 0x0 CR1 = 0x200 CR2 = 0x400 CR3 = 0x600 + CRAMFS_MAGIC = 0x28cd3d45 CRDLY = 0x600 CREAD = 0x80 CRTSCTS = 0x80000000 @@ -312,6 +329,9 @@ const ( CSTOP = 0x13 CSTOPB = 0x40 CSUSP = 0x1a + DAXFS_MAGIC = 0x64646178 + DEBUGFS_MAGIC = 0x64626720 + DEVPTS_SUPER_MAGIC = 0x1cd1 DT_BLK = 0x6 DT_CHR = 0x2 DT_DIR = 0x4 @@ -328,9 +348,12 @@ const ( ECHOKE = 0x800 ECHONL = 0x40 ECHOPRT = 0x400 + ECRYPTFS_SUPER_MAGIC = 0xf15f EFD_CLOEXEC = 0x80000 EFD_NONBLOCK = 0x80 EFD_SEMAPHORE = 0x1 + EFIVARFS_MAGIC = 0xde5e81e4 + EFS_SUPER_MAGIC = 0x414a53 ENCODING_DEFAULT = 0x0 ENCODING_FM_MARK = 0x3 ENCODING_FM_SPACE = 0x4 @@ -392,6 +415,7 @@ const ( ETH_P_ECONET = 0x18 ETH_P_EDSA = 0xdada ETH_P_ERSPAN = 0x88be + ETH_P_ERSPAN2 = 0x22eb ETH_P_FCOE = 0x8906 ETH_P_FIP = 0x8914 ETH_P_HDLC = 0x19 @@ -445,9 +469,14 @@ const ( ETH_P_WCCP = 0x883e ETH_P_X25 = 0x805 ETH_P_XDSA = 0xf8 + EXABYTE_ENABLE_NEST = 0xf0 + EXT2_SUPER_MAGIC = 0xef53 + EXT3_SUPER_MAGIC = 0xef53 + EXT4_SUPER_MAGIC = 0xef53 EXTA = 0xe EXTB = 0xf EXTPROC = 0x10000 + F2FS_SUPER_MAGIC = 0xf2f52010 FALLOC_FL_COLLAPSE_RANGE = 0x8 FALLOC_FL_INSERT_RANGE = 0x20 FALLOC_FL_KEEP_SIZE = 0x1 @@ -481,6 +510,7 @@ const ( FS_POLICY_FLAGS_PAD_8 = 0x1 FS_POLICY_FLAGS_PAD_MASK = 0x3 FS_POLICY_FLAGS_VALID = 0x3 + FUTEXFS_SUPER_MAGIC = 0xbad1dea F_ADD_SEALS = 0x409 F_DUPFD = 0x0 F_DUPFD_CLOEXEC = 0x406 @@ -542,6 +572,49 @@ const ( GENL_UNS_ADMIN_PERM = 0x10 GRND_NONBLOCK = 0x1 GRND_RANDOM = 0x2 + HDIO_DRIVE_CMD = 0x31f + HDIO_DRIVE_CMD_AEB = 0x31e + HDIO_DRIVE_CMD_HDR_SIZE = 0x4 + HDIO_DRIVE_HOB_HDR_SIZE = 0x8 + HDIO_DRIVE_RESET = 0x31c + HDIO_DRIVE_TASK = 0x31e + HDIO_DRIVE_TASKFILE = 0x31d + HDIO_DRIVE_TASK_HDR_SIZE = 0x8 + HDIO_GETGEO = 0x301 + HDIO_GET_32BIT = 0x309 + HDIO_GET_ACOUSTIC = 0x30f + HDIO_GET_ADDRESS = 0x310 + HDIO_GET_BUSSTATE = 0x31a + HDIO_GET_DMA = 0x30b + HDIO_GET_IDENTITY = 0x30d + HDIO_GET_KEEPSETTINGS = 0x308 + HDIO_GET_MULTCOUNT = 0x304 + HDIO_GET_NICE = 0x30c + HDIO_GET_NOWERR = 0x30a + HDIO_GET_QDMA = 0x305 + HDIO_GET_UNMASKINTR = 0x302 + HDIO_GET_WCACHE = 0x30e + HDIO_OBSOLETE_IDENTITY = 0x307 + HDIO_SCAN_HWIF = 0x328 + HDIO_SET_32BIT = 0x324 + HDIO_SET_ACOUSTIC = 0x32c + HDIO_SET_ADDRESS = 0x32f + HDIO_SET_BUSSTATE = 0x32d + HDIO_SET_DMA = 0x326 + HDIO_SET_KEEPSETTINGS = 0x323 + HDIO_SET_MULTCOUNT = 0x321 + HDIO_SET_NICE = 0x329 + HDIO_SET_NOWERR = 0x325 + HDIO_SET_PIO_MODE = 0x327 + HDIO_SET_QDMA = 0x32e + HDIO_SET_UNMASKINTR = 0x322 + HDIO_SET_WCACHE = 0x32b + HDIO_SET_XFER = 0x306 + HDIO_TRISTATE_HWIF = 0x31b + HDIO_UNREGISTER_HWIF = 0x32a + HOSTFS_SUPER_MAGIC = 0xc0ffee + HPFS_SUPER_MAGIC = 0xf995e849 + HUGETLBFS_MAGIC = 0x958458f6 HUPCL = 0x400 IBSHIFT = 0x10 ICANON = 0x2 @@ -792,12 +865,14 @@ const ( IP_UNICAST_IF = 0x32 IP_XFRM_POLICY = 0x11 ISIG = 0x1 + ISOFS_SUPER_MAGIC = 0x9660 ISTRIP = 0x20 IUCLC = 0x200 IUTF8 = 0x4000 IXANY = 0x800 IXOFF = 0x1000 IXON = 0x400 + JFFS2_SUPER_MAGIC = 0x72b6 KEYCTL_ASSUME_AUTHORITY = 0x10 KEYCTL_CHOWN = 0x4 KEYCTL_CLEAR = 0x7 @@ -894,9 +969,15 @@ const ( MCL_CURRENT = 0x1 MCL_FUTURE = 0x2 MCL_ONFAULT = 0x4 + MINIX2_SUPER_MAGIC = 0x2468 + MINIX2_SUPER_MAGIC2 = 0x2478 + MINIX3_SUPER_MAGIC = 0x4d5a + MINIX_SUPER_MAGIC = 0x137f + MINIX_SUPER_MAGIC2 = 0x138f MNT_DETACH = 0x2 MNT_EXPIRE = 0x4 MNT_FORCE = 0x1 + MSDOS_SUPER_MAGIC = 0x4d44 MSG_BATCH = 0x40000 MSG_CMSG_CLOEXEC = 0x40000000 MSG_CONFIRM = 0x800 @@ -956,7 +1037,9 @@ const ( MS_SYNCHRONOUS = 0x10 MS_UNBINDABLE = 0x20000 MS_VERBOSE = 0x8000 + MTD_INODE_FS_MAGIC = 0x11307854 NAME_MAX = 0xff + NCP_SUPER_MAGIC = 0x564c NETLINK_ADD_MEMBERSHIP = 0x1 NETLINK_AUDIT = 0x9 NETLINK_BROADCAST_ERROR = 0x4 @@ -991,6 +1074,37 @@ const ( NETLINK_UNUSED = 0x1 NETLINK_USERSOCK = 0x2 NETLINK_XFRM = 0x6 + NFNETLINK_V0 = 0x0 + NFNLGRP_ACCT_QUOTA = 0x8 + NFNLGRP_CONNTRACK_DESTROY = 0x3 + NFNLGRP_CONNTRACK_EXP_DESTROY = 0x6 + NFNLGRP_CONNTRACK_EXP_NEW = 0x4 + NFNLGRP_CONNTRACK_EXP_UPDATE = 0x5 + NFNLGRP_CONNTRACK_NEW = 0x1 + NFNLGRP_CONNTRACK_UPDATE = 0x2 + NFNLGRP_MAX = 0x9 + NFNLGRP_NFTABLES = 0x7 + NFNLGRP_NFTRACE = 0x9 + NFNLGRP_NONE = 0x0 + NFNL_BATCH_MAX = 0x1 + NFNL_MSG_BATCH_BEGIN = 0x10 + NFNL_MSG_BATCH_END = 0x11 + NFNL_NFA_NEST = 0x8000 + NFNL_SUBSYS_ACCT = 0x7 + NFNL_SUBSYS_COUNT = 0xc + NFNL_SUBSYS_CTHELPER = 0x9 + NFNL_SUBSYS_CTNETLINK = 0x1 + NFNL_SUBSYS_CTNETLINK_EXP = 0x2 + NFNL_SUBSYS_CTNETLINK_TIMEOUT = 0x8 + NFNL_SUBSYS_IPSET = 0x6 + NFNL_SUBSYS_NFTABLES = 0xa + NFNL_SUBSYS_NFT_COMPAT = 0xb + NFNL_SUBSYS_NONE = 0x0 + NFNL_SUBSYS_OSF = 0x5 + NFNL_SUBSYS_QUEUE = 0x3 + NFNL_SUBSYS_ULOG = 0x4 + NFS_SUPER_MAGIC = 0x6969 + NILFS_SUPER_MAGIC = 0x3434 NL0 = 0x0 NL1 = 0x100 NLA_ALIGNTO = 0x4 @@ -1023,6 +1137,8 @@ const ( NLM_F_REQUEST = 0x1 NLM_F_ROOT = 0x100 NOFLSH = 0x80 + NSFS_MAGIC = 0x6e736673 + OCFS2_SUPER_MAGIC = 0x7461636f OCRNL = 0x8 OFDEL = 0x80 OFILL = 0x40 @@ -1030,7 +1146,9 @@ const ( ONLCR = 0x4 ONLRET = 0x20 ONOCR = 0x10 + OPENPROM_SUPER_MAGIC = 0x9fa1 OPOST = 0x1 + OVERLAYFS_SUPER_MAGIC = 0x794c7630 O_ACCMODE = 0x3 O_APPEND = 0x8 O_ASYNC = 0x1000 @@ -1117,14 +1235,17 @@ const ( PERF_EVENT_IOC_ID = 0x40042407 PERF_EVENT_IOC_PAUSE_OUTPUT = 0x80042409 PERF_EVENT_IOC_PERIOD = 0x80082404 + PERF_EVENT_IOC_QUERY_BPF = 0xc004240a PERF_EVENT_IOC_REFRESH = 0x20002402 PERF_EVENT_IOC_RESET = 0x20002403 PERF_EVENT_IOC_SET_BPF = 0x80042408 PERF_EVENT_IOC_SET_FILTER = 0x80042406 PERF_EVENT_IOC_SET_OUTPUT = 0x20002405 + PIPEFS_MAGIC = 0x50495045 PRIO_PGRP = 0x1 PRIO_PROCESS = 0x0 PRIO_USER = 0x2 + PROC_SUPER_MAGIC = 0x9fa0 PROT_EXEC = 0x4 PROT_GROWSDOWN = 0x1000000 PROT_GROWSUP = 0x2000000 @@ -1230,6 +1351,7 @@ const ( PR_TSC_SIGSEGV = 0x2 PR_UNALIGN_NOPRINT = 0x1 PR_UNALIGN_SIGBUS = 0x2 + PSTOREFS_MAGIC = 0x6165676c PTRACE_ATTACH = 0x10 PTRACE_CONT = 0x7 PTRACE_DETACH = 0x11 @@ -1278,6 +1400,7 @@ const ( PTRACE_POKETEXT_3264 = 0xc2 PTRACE_POKEUSR = 0x6 PTRACE_SECCOMP_GET_FILTER = 0x420c + PTRACE_SECCOMP_GET_METADATA = 0x420d PTRACE_SEIZE = 0x4206 PTRACE_SETFPREGS = 0xf PTRACE_SETOPTIONS = 0x4200 @@ -1290,6 +1413,11 @@ const ( PTRACE_SINGLESTEP = 0x9 PTRACE_SYSCALL = 0x18 PTRACE_TRACEME = 0x0 + QNX4_SUPER_MAGIC = 0x2f + QNX6_SUPER_MAGIC = 0x68191122 + RAMFS_MAGIC = 0x858458f6 + RDTGROUP_SUPER_MAGIC = 0x7655821 + REISERFS_SUPER_MAGIC = 0x52654973 RLIMIT_AS = 0x6 RLIMIT_CORE = 0x4 RLIMIT_CPU = 0x0 @@ -1475,6 +1603,8 @@ const ( SECCOMP_MODE_DISABLED = 0x0 SECCOMP_MODE_FILTER = 0x2 SECCOMP_MODE_STRICT = 0x1 + SECURITYFS_MAGIC = 0x73636673 + SELINUX_MAGIC = 0xf97cff8c SHUT_RD = 0x0 SHUT_RDWR = 0x2 SHUT_WR = 0x1 @@ -1559,6 +1689,23 @@ const ( SIOCSPGRP = 0x80047308 SIOCSRARP = 0x8962 SIOCWANDEV = 0x894a + SMACK_MAGIC = 0x43415d53 + SMART_AUTOSAVE = 0xd2 + SMART_AUTO_OFFLINE = 0xdb + SMART_DISABLE = 0xd9 + SMART_ENABLE = 0xd8 + SMART_HCYL_PASS = 0xc2 + SMART_IMMEDIATE_OFFLINE = 0xd4 + SMART_LCYL_PASS = 0x4f + SMART_READ_LOG_SECTOR = 0xd5 + SMART_READ_THRESHOLDS = 0xd1 + SMART_READ_VALUES = 0xd0 + SMART_SAVE = 0xd3 + SMART_STATUS = 0xda + SMART_WRITE_LOG_SECTOR = 0xd6 + SMART_WRITE_THRESHOLDS = 0xd7 + SMB_SUPER_MAGIC = 0x517b + SOCKFS_MAGIC = 0x534f434b SOCK_CLOEXEC = 0x80000 SOCK_DCCP = 0x6 SOCK_DGRAM = 0x1 @@ -1670,6 +1817,8 @@ const ( SPLICE_F_MORE = 0x4 SPLICE_F_MOVE = 0x1 SPLICE_F_NONBLOCK = 0x2 + SQUASHFS_MAGIC = 0x73717368 + STACK_END_MAGIC = 0x57ac6e9d STATX_ALL = 0xfff STATX_ATIME = 0x20 STATX_ATTR_APPEND = 0x20 @@ -1691,6 +1840,7 @@ const ( STATX_TYPE = 0x1 STATX_UID = 0x8 STATX__RESERVED = 0x80000000 + SYSFS_MAGIC = 0x62656572 S_BLKSIZE = 0x200 S_IEXEC = 0x40 S_IFBLK = 0x6000 @@ -1873,7 +2023,27 @@ const ( TIOCSTI = 0x5472 TIOCSWINSZ = 0x80087467 TIOCVHANGUP = 0x5437 + TMPFS_MAGIC = 0x1021994 TOSTOP = 0x8000 + TPACKET_ALIGNMENT = 0x10 + TPACKET_HDRLEN = 0x34 + TP_STATUS_AVAILABLE = 0x0 + TP_STATUS_BLK_TMO = 0x20 + TP_STATUS_COPY = 0x2 + TP_STATUS_CSUMNOTREADY = 0x8 + TP_STATUS_CSUM_VALID = 0x80 + TP_STATUS_KERNEL = 0x0 + TP_STATUS_LOSING = 0x4 + TP_STATUS_SENDING = 0x2 + TP_STATUS_SEND_REQUEST = 0x1 + TP_STATUS_TS_RAW_HARDWARE = -0x80000000 + TP_STATUS_TS_SOFTWARE = 0x20000000 + TP_STATUS_TS_SYS_HARDWARE = 0x40000000 + TP_STATUS_USER = 0x1 + TP_STATUS_VLAN_TPID_VALID = 0x40 + TP_STATUS_VLAN_VALID = 0x10 + TP_STATUS_WRONG_FORMAT = 0x4 + TRACEFS_MAGIC = 0x74726163 TS_COMM_LEN = 0x20 TUNATTACHFILTER = 0x800854d5 TUNDETACHFILTER = 0x800854d6 @@ -1885,6 +2055,7 @@ const ( TUNGETVNETHDRSZ = 0x400454d7 TUNGETVNETLE = 0x400454dd TUNSETDEBUG = 0x800454c9 + TUNSETFILTEREBPF = 0x400454e1 TUNSETGROUP = 0x800454ce TUNSETIFF = 0x800454ca TUNSETIFINDEX = 0x800454da @@ -1895,13 +2066,17 @@ const ( TUNSETPERSIST = 0x800454cb TUNSETQUEUE = 0x800454d9 TUNSETSNDBUF = 0x800454d4 + TUNSETSTEERINGEBPF = 0x400454e0 TUNSETTXFILTER = 0x800454d1 TUNSETVNETBE = 0x800454de TUNSETVNETHDRSZ = 0x800454d8 TUNSETVNETLE = 0x800454dc + UDF_SUPER_MAGIC = 0x15013346 UMOUNT_NOFOLLOW = 0x8 + USBDEVICE_SUPER_MAGIC = 0x9fa2 UTIME_NOW = 0x3fffffff UTIME_OMIT = 0x3ffffffe + V9FS_MAGIC = 0x1021997 VDISCARD = 0xd VEOF = 0x10 VEOL = 0x11 @@ -1944,6 +2119,86 @@ const ( WDIOC_SETPRETIMEOUT = 0xc0045708 WDIOC_SETTIMEOUT = 0xc0045706 WEXITED = 0x4 + WIN_ACKMEDIACHANGE = 0xdb + WIN_CHECKPOWERMODE1 = 0xe5 + WIN_CHECKPOWERMODE2 = 0x98 + WIN_DEVICE_RESET = 0x8 + WIN_DIAGNOSE = 0x90 + WIN_DOORLOCK = 0xde + WIN_DOORUNLOCK = 0xdf + WIN_DOWNLOAD_MICROCODE = 0x92 + WIN_FLUSH_CACHE = 0xe7 + WIN_FLUSH_CACHE_EXT = 0xea + WIN_FORMAT = 0x50 + WIN_GETMEDIASTATUS = 0xda + WIN_IDENTIFY = 0xec + WIN_IDENTIFY_DMA = 0xee + WIN_IDLEIMMEDIATE = 0xe1 + WIN_INIT = 0x60 + WIN_MEDIAEJECT = 0xed + WIN_MULTREAD = 0xc4 + WIN_MULTREAD_EXT = 0x29 + WIN_MULTWRITE = 0xc5 + WIN_MULTWRITE_EXT = 0x39 + WIN_NOP = 0x0 + WIN_PACKETCMD = 0xa0 + WIN_PIDENTIFY = 0xa1 + WIN_POSTBOOT = 0xdc + WIN_PREBOOT = 0xdd + WIN_QUEUED_SERVICE = 0xa2 + WIN_READ = 0x20 + WIN_READDMA = 0xc8 + WIN_READDMA_EXT = 0x25 + WIN_READDMA_ONCE = 0xc9 + WIN_READDMA_QUEUED = 0xc7 + WIN_READDMA_QUEUED_EXT = 0x26 + WIN_READ_BUFFER = 0xe4 + WIN_READ_EXT = 0x24 + WIN_READ_LONG = 0x22 + WIN_READ_LONG_ONCE = 0x23 + WIN_READ_NATIVE_MAX = 0xf8 + WIN_READ_NATIVE_MAX_EXT = 0x27 + WIN_READ_ONCE = 0x21 + WIN_RECAL = 0x10 + WIN_RESTORE = 0x10 + WIN_SECURITY_DISABLE = 0xf6 + WIN_SECURITY_ERASE_PREPARE = 0xf3 + WIN_SECURITY_ERASE_UNIT = 0xf4 + WIN_SECURITY_FREEZE_LOCK = 0xf5 + WIN_SECURITY_SET_PASS = 0xf1 + WIN_SECURITY_UNLOCK = 0xf2 + WIN_SEEK = 0x70 + WIN_SETFEATURES = 0xef + WIN_SETIDLE1 = 0xe3 + WIN_SETIDLE2 = 0x97 + WIN_SETMULT = 0xc6 + WIN_SET_MAX = 0xf9 + WIN_SET_MAX_EXT = 0x37 + WIN_SLEEPNOW1 = 0xe6 + WIN_SLEEPNOW2 = 0x99 + WIN_SMART = 0xb0 + WIN_SPECIFY = 0x91 + WIN_SRST = 0x8 + WIN_STANDBY = 0xe2 + WIN_STANDBY2 = 0x96 + WIN_STANDBYNOW1 = 0xe0 + WIN_STANDBYNOW2 = 0x94 + WIN_VERIFY = 0x40 + WIN_VERIFY_EXT = 0x42 + WIN_VERIFY_ONCE = 0x41 + WIN_WRITE = 0x30 + WIN_WRITEDMA = 0xca + WIN_WRITEDMA_EXT = 0x35 + WIN_WRITEDMA_ONCE = 0xcb + WIN_WRITEDMA_QUEUED = 0xcc + WIN_WRITEDMA_QUEUED_EXT = 0x36 + WIN_WRITE_BUFFER = 0xe8 + WIN_WRITE_EXT = 0x34 + WIN_WRITE_LONG = 0x32 + WIN_WRITE_LONG_ONCE = 0x33 + WIN_WRITE_ONCE = 0x31 + WIN_WRITE_SAME = 0xe9 + WIN_WRITE_VERIFY = 0x3c WNOHANG = 0x1 WNOTHREAD = 0x20000000 WNOWAIT = 0x1000000 @@ -1953,7 +2208,9 @@ const ( XATTR_CREATE = 0x1 XATTR_REPLACE = 0x2 XCASE = 0x4 + XENFS_SUPER_MAGIC = 0xabba1974 XTABS = 0x1800 + ZSMALLOC_MAGIC = 0x58295829 ) // Errors @@ -2135,174 +2392,182 @@ const ( ) // Error table -var errors = [...]string{ - 1: "operation not permitted", - 2: "no such file or directory", - 3: "no such process", - 4: "interrupted system call", - 5: "input/output error", - 6: "no such device or address", - 7: "argument list too long", - 8: "exec format error", - 9: "bad file descriptor", - 10: "no child processes", - 11: "resource temporarily unavailable", - 12: "cannot allocate memory", - 13: "permission denied", - 14: "bad address", - 15: "block device required", - 16: "device or resource busy", - 17: "file exists", - 18: "invalid cross-device link", - 19: "no such device", - 20: "not a directory", - 21: "is a directory", - 22: "invalid argument", - 23: "too many open files in system", - 24: "too many open files", - 25: "inappropriate ioctl for device", - 26: "text file busy", - 27: "file too large", - 28: "no space left on device", - 29: "illegal seek", - 30: "read-only file system", - 31: "too many links", - 32: "broken pipe", - 33: "numerical argument out of domain", - 34: "numerical result out of range", - 35: "no message of desired type", - 36: "identifier removed", - 37: "channel number out of range", - 38: "level 2 not synchronized", - 39: "level 3 halted", - 40: "level 3 reset", - 41: "link number out of range", - 42: "protocol driver not attached", - 43: "no CSI structure available", - 44: "level 2 halted", - 45: "resource deadlock avoided", - 46: "no locks available", - 50: "invalid exchange", - 51: "invalid request descriptor", - 52: "exchange full", - 53: "no anode", - 54: "invalid request code", - 55: "invalid slot", - 56: "file locking deadlock error", - 59: "bad font file format", - 60: "device not a stream", - 61: "no data available", - 62: "timer expired", - 63: "out of streams resources", - 64: "machine is not on the network", - 65: "package not installed", - 66: "object is remote", - 67: "link has been severed", - 68: "advertise error", - 69: "srmount error", - 70: "communication error on send", - 71: "protocol error", - 73: "RFS specific error", - 74: "multihop attempted", - 77: "bad message", - 78: "file name too long", - 79: "value too large for defined data type", - 80: "name not unique on network", - 81: "file descriptor in bad state", - 82: "remote address changed", - 83: "can not access a needed shared library", - 84: "accessing a corrupted shared library", - 85: ".lib section in a.out corrupted", - 86: "attempting to link in too many shared libraries", - 87: "cannot exec a shared library directly", - 88: "invalid or incomplete multibyte or wide character", - 89: "function not implemented", - 90: "too many levels of symbolic links", - 91: "interrupted system call should be restarted", - 92: "streams pipe error", - 93: "directory not empty", - 94: "too many users", - 95: "socket operation on non-socket", - 96: "destination address required", - 97: "message too long", - 98: "protocol wrong type for socket", - 99: "protocol not available", - 120: "protocol not supported", - 121: "socket type not supported", - 122: "operation not supported", - 123: "protocol family not supported", - 124: "address family not supported by protocol", - 125: "address already in use", - 126: "cannot assign requested address", - 127: "network is down", - 128: "network is unreachable", - 129: "network dropped connection on reset", - 130: "software caused connection abort", - 131: "connection reset by peer", - 132: "no buffer space available", - 133: "transport endpoint is already connected", - 134: "transport endpoint is not connected", - 135: "structure needs cleaning", - 137: "not a XENIX named type file", - 138: "no XENIX semaphores available", - 139: "is a named type file", - 140: "remote I/O error", - 141: "unknown error 141", - 142: "unknown error 142", - 143: "cannot send after transport endpoint shutdown", - 144: "too many references: cannot splice", - 145: "connection timed out", - 146: "connection refused", - 147: "host is down", - 148: "no route to host", - 149: "operation already in progress", - 150: "operation now in progress", - 151: "stale file handle", - 158: "operation canceled", - 159: "no medium found", - 160: "wrong medium type", - 161: "required key not available", - 162: "key has expired", - 163: "key has been revoked", - 164: "key was rejected by service", - 165: "owner died", - 166: "state not recoverable", - 167: "operation not possible due to RF-kill", - 168: "memory page has hardware error", - 1133: "disk quota exceeded", +var errorList = [...]struct { + num syscall.Errno + name string + desc string +}{ + {1, "EPERM", "operation not permitted"}, + {2, "ENOENT", "no such file or directory"}, + {3, "ESRCH", "no such process"}, + {4, "EINTR", "interrupted system call"}, + {5, "EIO", "input/output error"}, + {6, "ENXIO", "no such device or address"}, + {7, "E2BIG", "argument list too long"}, + {8, "ENOEXEC", "exec format error"}, + {9, "EBADF", "bad file descriptor"}, + {10, "ECHILD", "no child processes"}, + {11, "EAGAIN", "resource temporarily unavailable"}, + {12, "ENOMEM", "cannot allocate memory"}, + {13, "EACCES", "permission denied"}, + {14, "EFAULT", "bad address"}, + {15, "ENOTBLK", "block device required"}, + {16, "EBUSY", "device or resource busy"}, + {17, "EEXIST", "file exists"}, + {18, "EXDEV", "invalid cross-device link"}, + {19, "ENODEV", "no such device"}, + {20, "ENOTDIR", "not a directory"}, + {21, "EISDIR", "is a directory"}, + {22, "EINVAL", "invalid argument"}, + {23, "ENFILE", "too many open files in system"}, + {24, "EMFILE", "too many open files"}, + {25, "ENOTTY", "inappropriate ioctl for device"}, + {26, "ETXTBSY", "text file busy"}, + {27, "EFBIG", "file too large"}, + {28, "ENOSPC", "no space left on device"}, + {29, "ESPIPE", "illegal seek"}, + {30, "EROFS", "read-only file system"}, + {31, "EMLINK", "too many links"}, + {32, "EPIPE", "broken pipe"}, + {33, "EDOM", "numerical argument out of domain"}, + {34, "ERANGE", "numerical result out of range"}, + {35, "ENOMSG", "no message of desired type"}, + {36, "EIDRM", "identifier removed"}, + {37, "ECHRNG", "channel number out of range"}, + {38, "EL2NSYNC", "level 2 not synchronized"}, + {39, "EL3HLT", "level 3 halted"}, + {40, "EL3RST", "level 3 reset"}, + {41, "ELNRNG", "link number out of range"}, + {42, "EUNATCH", "protocol driver not attached"}, + {43, "ENOCSI", "no CSI structure available"}, + {44, "EL2HLT", "level 2 halted"}, + {45, "EDEADLK", "resource deadlock avoided"}, + {46, "ENOLCK", "no locks available"}, + {50, "EBADE", "invalid exchange"}, + {51, "EBADR", "invalid request descriptor"}, + {52, "EXFULL", "exchange full"}, + {53, "ENOANO", "no anode"}, + {54, "EBADRQC", "invalid request code"}, + {55, "EBADSLT", "invalid slot"}, + {56, "EDEADLOCK", "file locking deadlock error"}, + {59, "EBFONT", "bad font file format"}, + {60, "ENOSTR", "device not a stream"}, + {61, "ENODATA", "no data available"}, + {62, "ETIME", "timer expired"}, + {63, "ENOSR", "out of streams resources"}, + {64, "ENONET", "machine is not on the network"}, + {65, "ENOPKG", "package not installed"}, + {66, "EREMOTE", "object is remote"}, + {67, "ENOLINK", "link has been severed"}, + {68, "EADV", "advertise error"}, + {69, "ESRMNT", "srmount error"}, + {70, "ECOMM", "communication error on send"}, + {71, "EPROTO", "protocol error"}, + {73, "EDOTDOT", "RFS specific error"}, + {74, "EMULTIHOP", "multihop attempted"}, + {77, "EBADMSG", "bad message"}, + {78, "ENAMETOOLONG", "file name too long"}, + {79, "EOVERFLOW", "value too large for defined data type"}, + {80, "ENOTUNIQ", "name not unique on network"}, + {81, "EBADFD", "file descriptor in bad state"}, + {82, "EREMCHG", "remote address changed"}, + {83, "ELIBACC", "can not access a needed shared library"}, + {84, "ELIBBAD", "accessing a corrupted shared library"}, + {85, "ELIBSCN", ".lib section in a.out corrupted"}, + {86, "ELIBMAX", "attempting to link in too many shared libraries"}, + {87, "ELIBEXEC", "cannot exec a shared library directly"}, + {88, "EILSEQ", "invalid or incomplete multibyte or wide character"}, + {89, "ENOSYS", "function not implemented"}, + {90, "ELOOP", "too many levels of symbolic links"}, + {91, "ERESTART", "interrupted system call should be restarted"}, + {92, "ESTRPIPE", "streams pipe error"}, + {93, "ENOTEMPTY", "directory not empty"}, + {94, "EUSERS", "too many users"}, + {95, "ENOTSOCK", "socket operation on non-socket"}, + {96, "EDESTADDRREQ", "destination address required"}, + {97, "EMSGSIZE", "message too long"}, + {98, "EPROTOTYPE", "protocol wrong type for socket"}, + {99, "ENOPROTOOPT", "protocol not available"}, + {120, "EPROTONOSUPPORT", "protocol not supported"}, + {121, "ESOCKTNOSUPPORT", "socket type not supported"}, + {122, "ENOTSUP", "operation not supported"}, + {123, "EPFNOSUPPORT", "protocol family not supported"}, + {124, "EAFNOSUPPORT", "address family not supported by protocol"}, + {125, "EADDRINUSE", "address already in use"}, + {126, "EADDRNOTAVAIL", "cannot assign requested address"}, + {127, "ENETDOWN", "network is down"}, + {128, "ENETUNREACH", "network is unreachable"}, + {129, "ENETRESET", "network dropped connection on reset"}, + {130, "ECONNABORTED", "software caused connection abort"}, + {131, "ECONNRESET", "connection reset by peer"}, + {132, "ENOBUFS", "no buffer space available"}, + {133, "EISCONN", "transport endpoint is already connected"}, + {134, "ENOTCONN", "transport endpoint is not connected"}, + {135, "EUCLEAN", "structure needs cleaning"}, + {137, "ENOTNAM", "not a XENIX named type file"}, + {138, "ENAVAIL", "no XENIX semaphores available"}, + {139, "EISNAM", "is a named type file"}, + {140, "EREMOTEIO", "remote I/O error"}, + {141, "EINIT", "unknown error 141"}, + {142, "EREMDEV", "unknown error 142"}, + {143, "ESHUTDOWN", "cannot send after transport endpoint shutdown"}, + {144, "ETOOMANYREFS", "too many references: cannot splice"}, + {145, "ETIMEDOUT", "connection timed out"}, + {146, "ECONNREFUSED", "connection refused"}, + {147, "EHOSTDOWN", "host is down"}, + {148, "EHOSTUNREACH", "no route to host"}, + {149, "EALREADY", "operation already in progress"}, + {150, "EINPROGRESS", "operation now in progress"}, + {151, "ESTALE", "stale file handle"}, + {158, "ECANCELED", "operation canceled"}, + {159, "ENOMEDIUM", "no medium found"}, + {160, "EMEDIUMTYPE", "wrong medium type"}, + {161, "ENOKEY", "required key not available"}, + {162, "EKEYEXPIRED", "key has expired"}, + {163, "EKEYREVOKED", "key has been revoked"}, + {164, "EKEYREJECTED", "key was rejected by service"}, + {165, "EOWNERDEAD", "owner died"}, + {166, "ENOTRECOVERABLE", "state not recoverable"}, + {167, "ERFKILL", "operation not possible due to RF-kill"}, + {168, "EHWPOISON", "memory page has hardware error"}, + {1133, "EDQUOT", "disk quota exceeded"}, } // Signal table -var signals = [...]string{ - 1: "hangup", - 2: "interrupt", - 3: "quit", - 4: "illegal instruction", - 5: "trace/breakpoint trap", - 6: "aborted", - 7: "EMT trap", - 8: "floating point exception", - 9: "killed", - 10: "bus error", - 11: "segmentation fault", - 12: "bad system call", - 13: "broken pipe", - 14: "alarm clock", - 15: "terminated", - 16: "user defined signal 1", - 17: "user defined signal 2", - 18: "child exited", - 19: "power failure", - 20: "window changed", - 21: "urgent I/O condition", - 22: "I/O possible", - 23: "stopped (signal)", - 24: "stopped", - 25: "continued", - 26: "stopped (tty input)", - 27: "stopped (tty output)", - 28: "virtual timer expired", - 29: "profiling timer expired", - 30: "CPU time limit exceeded", - 31: "file size limit exceeded", +var signalList = [...]struct { + num syscall.Signal + name string + desc string +}{ + {1, "SIGHUP", "hangup"}, + {2, "SIGINT", "interrupt"}, + {3, "SIGQUIT", "quit"}, + {4, "SIGILL", "illegal instruction"}, + {5, "SIGTRAP", "trace/breakpoint trap"}, + {6, "SIGABRT", "aborted"}, + {7, "SIGEMT", "EMT trap"}, + {8, "SIGFPE", "floating point exception"}, + {9, "SIGKILL", "killed"}, + {10, "SIGBUS", "bus error"}, + {11, "SIGSEGV", "segmentation fault"}, + {12, "SIGSYS", "bad system call"}, + {13, "SIGPIPE", "broken pipe"}, + {14, "SIGALRM", "alarm clock"}, + {15, "SIGTERM", "terminated"}, + {16, "SIGUSR1", "user defined signal 1"}, + {17, "SIGUSR2", "user defined signal 2"}, + {18, "SIGCHLD", "child exited"}, + {19, "SIGPWR", "power failure"}, + {20, "SIGWINCH", "window changed"}, + {21, "SIGURG", "urgent I/O condition"}, + {22, "SIGIO", "I/O possible"}, + {23, "SIGSTOP", "stopped (signal)"}, + {24, "SIGTSTP", "stopped"}, + {25, "SIGCONT", "continued"}, + {26, "SIGTTIN", "stopped (tty input)"}, + {27, "SIGTTOU", "stopped (tty output)"}, + {28, "SIGVTALRM", "virtual timer expired"}, + {29, "SIGPROF", "profiling timer expired"}, + {30, "SIGXCPU", "CPU time limit exceeded"}, + {31, "SIGXFSZ", "file size limit exceeded"}, } diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go index 423f48ae09b..37e851aabe4 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go @@ -11,6 +11,11 @@ package unix import "syscall" const ( + AAFS_MAGIC = 0x5a3c69f0 + ADFS_SUPER_MAGIC = 0xadf5 + AFFS_SUPER_MAGIC = 0xadff + AFS_FS_MAGIC = 0x6b414653 + AFS_SUPER_MAGIC = 0x5346414f AF_ALG = 0x26 AF_APPLETALK = 0x5 AF_ASH = 0x12 @@ -66,6 +71,7 @@ const ( ALG_SET_IV = 0x2 ALG_SET_KEY = 0x1 ALG_SET_OP = 0x3 + ANON_INODE_FS_MAGIC = 0x9041934 ARPHRD_6LOWPAN = 0x339 ARPHRD_ADAPT = 0x108 ARPHRD_APPLETLK = 0x8 @@ -133,6 +139,7 @@ const ( ARPHRD_VOID = 0xffff ARPHRD_VSOCKMON = 0x33a ARPHRD_X25 = 0x10f + AUTOFS_SUPER_MAGIC = 0x187 B0 = 0x0 B1000000 = 0x1008 B110 = 0x3 @@ -164,6 +171,9 @@ const ( B75 = 0x2 B921600 = 0x1007 B9600 = 0xd + BALLOON_KVM_MAGIC = 0x13661366 + BDEVFS_MAGIC = 0x62646576 + BINFMTFS_MAGIC = 0x42494e4d BLKBSZGET = 0x40081270 BLKBSZSET = 0x80081271 BLKFLSBUF = 0x20001261 @@ -188,6 +198,7 @@ const ( BPF_AND = 0x50 BPF_B = 0x10 BPF_DIV = 0x30 + BPF_FS_MAGIC = 0xcafe4a11 BPF_H = 0x8 BPF_IMM = 0x0 BPF_IND = 0x40 @@ -229,6 +240,8 @@ const ( BS0 = 0x0 BS1 = 0x2000 BSDLY = 0x2000 + BTRFS_SUPER_MAGIC = 0x9123683e + BTRFS_TEST_MAGIC = 0x73727279 CAN_BCM = 0x2 CAN_EFF_FLAG = 0x80000000 CAN_EFF_ID_BITS = 0x1d @@ -252,6 +265,8 @@ const ( CBAUD = 0x100f CBAUDEX = 0x1000 CFLUSH = 0xf + CGROUP2_SUPER_MAGIC = 0x63677270 + CGROUP_SUPER_MAGIC = 0x27e0eb CIBAUD = 0x100f0000 CLOCAL = 0x800 CLOCK_BOOTTIME = 0x7 @@ -294,10 +309,12 @@ const ( CLONE_VFORK = 0x4000 CLONE_VM = 0x100 CMSPAR = 0x40000000 + CODA_SUPER_MAGIC = 0x73757245 CR0 = 0x0 CR1 = 0x200 CR2 = 0x400 CR3 = 0x600 + CRAMFS_MAGIC = 0x28cd3d45 CRDLY = 0x600 CREAD = 0x80 CRTSCTS = 0x80000000 @@ -312,6 +329,9 @@ const ( CSTOP = 0x13 CSTOPB = 0x40 CSUSP = 0x1a + DAXFS_MAGIC = 0x64646178 + DEBUGFS_MAGIC = 0x64626720 + DEVPTS_SUPER_MAGIC = 0x1cd1 DT_BLK = 0x6 DT_CHR = 0x2 DT_DIR = 0x4 @@ -328,9 +348,12 @@ const ( ECHOKE = 0x800 ECHONL = 0x40 ECHOPRT = 0x400 + ECRYPTFS_SUPER_MAGIC = 0xf15f EFD_CLOEXEC = 0x80000 EFD_NONBLOCK = 0x80 EFD_SEMAPHORE = 0x1 + EFIVARFS_MAGIC = 0xde5e81e4 + EFS_SUPER_MAGIC = 0x414a53 ENCODING_DEFAULT = 0x0 ENCODING_FM_MARK = 0x3 ENCODING_FM_SPACE = 0x4 @@ -392,6 +415,7 @@ const ( ETH_P_ECONET = 0x18 ETH_P_EDSA = 0xdada ETH_P_ERSPAN = 0x88be + ETH_P_ERSPAN2 = 0x22eb ETH_P_FCOE = 0x8906 ETH_P_FIP = 0x8914 ETH_P_HDLC = 0x19 @@ -445,9 +469,14 @@ const ( ETH_P_WCCP = 0x883e ETH_P_X25 = 0x805 ETH_P_XDSA = 0xf8 + EXABYTE_ENABLE_NEST = 0xf0 + EXT2_SUPER_MAGIC = 0xef53 + EXT3_SUPER_MAGIC = 0xef53 + EXT4_SUPER_MAGIC = 0xef53 EXTA = 0xe EXTB = 0xf EXTPROC = 0x10000 + F2FS_SUPER_MAGIC = 0xf2f52010 FALLOC_FL_COLLAPSE_RANGE = 0x8 FALLOC_FL_INSERT_RANGE = 0x20 FALLOC_FL_KEEP_SIZE = 0x1 @@ -481,6 +510,7 @@ const ( FS_POLICY_FLAGS_PAD_8 = 0x1 FS_POLICY_FLAGS_PAD_MASK = 0x3 FS_POLICY_FLAGS_VALID = 0x3 + FUTEXFS_SUPER_MAGIC = 0xbad1dea F_ADD_SEALS = 0x409 F_DUPFD = 0x0 F_DUPFD_CLOEXEC = 0x406 @@ -542,6 +572,49 @@ const ( GENL_UNS_ADMIN_PERM = 0x10 GRND_NONBLOCK = 0x1 GRND_RANDOM = 0x2 + HDIO_DRIVE_CMD = 0x31f + HDIO_DRIVE_CMD_AEB = 0x31e + HDIO_DRIVE_CMD_HDR_SIZE = 0x4 + HDIO_DRIVE_HOB_HDR_SIZE = 0x8 + HDIO_DRIVE_RESET = 0x31c + HDIO_DRIVE_TASK = 0x31e + HDIO_DRIVE_TASKFILE = 0x31d + HDIO_DRIVE_TASK_HDR_SIZE = 0x8 + HDIO_GETGEO = 0x301 + HDIO_GET_32BIT = 0x309 + HDIO_GET_ACOUSTIC = 0x30f + HDIO_GET_ADDRESS = 0x310 + HDIO_GET_BUSSTATE = 0x31a + HDIO_GET_DMA = 0x30b + HDIO_GET_IDENTITY = 0x30d + HDIO_GET_KEEPSETTINGS = 0x308 + HDIO_GET_MULTCOUNT = 0x304 + HDIO_GET_NICE = 0x30c + HDIO_GET_NOWERR = 0x30a + HDIO_GET_QDMA = 0x305 + HDIO_GET_UNMASKINTR = 0x302 + HDIO_GET_WCACHE = 0x30e + HDIO_OBSOLETE_IDENTITY = 0x307 + HDIO_SCAN_HWIF = 0x328 + HDIO_SET_32BIT = 0x324 + HDIO_SET_ACOUSTIC = 0x32c + HDIO_SET_ADDRESS = 0x32f + HDIO_SET_BUSSTATE = 0x32d + HDIO_SET_DMA = 0x326 + HDIO_SET_KEEPSETTINGS = 0x323 + HDIO_SET_MULTCOUNT = 0x321 + HDIO_SET_NICE = 0x329 + HDIO_SET_NOWERR = 0x325 + HDIO_SET_PIO_MODE = 0x327 + HDIO_SET_QDMA = 0x32e + HDIO_SET_UNMASKINTR = 0x322 + HDIO_SET_WCACHE = 0x32b + HDIO_SET_XFER = 0x306 + HDIO_TRISTATE_HWIF = 0x31b + HDIO_UNREGISTER_HWIF = 0x32a + HOSTFS_SUPER_MAGIC = 0xc0ffee + HPFS_SUPER_MAGIC = 0xf995e849 + HUGETLBFS_MAGIC = 0x958458f6 HUPCL = 0x400 IBSHIFT = 0x10 ICANON = 0x2 @@ -792,12 +865,14 @@ const ( IP_UNICAST_IF = 0x32 IP_XFRM_POLICY = 0x11 ISIG = 0x1 + ISOFS_SUPER_MAGIC = 0x9660 ISTRIP = 0x20 IUCLC = 0x200 IUTF8 = 0x4000 IXANY = 0x800 IXOFF = 0x1000 IXON = 0x400 + JFFS2_SUPER_MAGIC = 0x72b6 KEYCTL_ASSUME_AUTHORITY = 0x10 KEYCTL_CHOWN = 0x4 KEYCTL_CLEAR = 0x7 @@ -894,9 +969,15 @@ const ( MCL_CURRENT = 0x1 MCL_FUTURE = 0x2 MCL_ONFAULT = 0x4 + MINIX2_SUPER_MAGIC = 0x2468 + MINIX2_SUPER_MAGIC2 = 0x2478 + MINIX3_SUPER_MAGIC = 0x4d5a + MINIX_SUPER_MAGIC = 0x137f + MINIX_SUPER_MAGIC2 = 0x138f MNT_DETACH = 0x2 MNT_EXPIRE = 0x4 MNT_FORCE = 0x1 + MSDOS_SUPER_MAGIC = 0x4d44 MSG_BATCH = 0x40000 MSG_CMSG_CLOEXEC = 0x40000000 MSG_CONFIRM = 0x800 @@ -956,7 +1037,9 @@ const ( MS_SYNCHRONOUS = 0x10 MS_UNBINDABLE = 0x20000 MS_VERBOSE = 0x8000 + MTD_INODE_FS_MAGIC = 0x11307854 NAME_MAX = 0xff + NCP_SUPER_MAGIC = 0x564c NETLINK_ADD_MEMBERSHIP = 0x1 NETLINK_AUDIT = 0x9 NETLINK_BROADCAST_ERROR = 0x4 @@ -991,6 +1074,37 @@ const ( NETLINK_UNUSED = 0x1 NETLINK_USERSOCK = 0x2 NETLINK_XFRM = 0x6 + NFNETLINK_V0 = 0x0 + NFNLGRP_ACCT_QUOTA = 0x8 + NFNLGRP_CONNTRACK_DESTROY = 0x3 + NFNLGRP_CONNTRACK_EXP_DESTROY = 0x6 + NFNLGRP_CONNTRACK_EXP_NEW = 0x4 + NFNLGRP_CONNTRACK_EXP_UPDATE = 0x5 + NFNLGRP_CONNTRACK_NEW = 0x1 + NFNLGRP_CONNTRACK_UPDATE = 0x2 + NFNLGRP_MAX = 0x9 + NFNLGRP_NFTABLES = 0x7 + NFNLGRP_NFTRACE = 0x9 + NFNLGRP_NONE = 0x0 + NFNL_BATCH_MAX = 0x1 + NFNL_MSG_BATCH_BEGIN = 0x10 + NFNL_MSG_BATCH_END = 0x11 + NFNL_NFA_NEST = 0x8000 + NFNL_SUBSYS_ACCT = 0x7 + NFNL_SUBSYS_COUNT = 0xc + NFNL_SUBSYS_CTHELPER = 0x9 + NFNL_SUBSYS_CTNETLINK = 0x1 + NFNL_SUBSYS_CTNETLINK_EXP = 0x2 + NFNL_SUBSYS_CTNETLINK_TIMEOUT = 0x8 + NFNL_SUBSYS_IPSET = 0x6 + NFNL_SUBSYS_NFTABLES = 0xa + NFNL_SUBSYS_NFT_COMPAT = 0xb + NFNL_SUBSYS_NONE = 0x0 + NFNL_SUBSYS_OSF = 0x5 + NFNL_SUBSYS_QUEUE = 0x3 + NFNL_SUBSYS_ULOG = 0x4 + NFS_SUPER_MAGIC = 0x6969 + NILFS_SUPER_MAGIC = 0x3434 NL0 = 0x0 NL1 = 0x100 NLA_ALIGNTO = 0x4 @@ -1023,6 +1137,8 @@ const ( NLM_F_REQUEST = 0x1 NLM_F_ROOT = 0x100 NOFLSH = 0x80 + NSFS_MAGIC = 0x6e736673 + OCFS2_SUPER_MAGIC = 0x7461636f OCRNL = 0x8 OFDEL = 0x80 OFILL = 0x40 @@ -1030,7 +1146,9 @@ const ( ONLCR = 0x4 ONLRET = 0x20 ONOCR = 0x10 + OPENPROM_SUPER_MAGIC = 0x9fa1 OPOST = 0x1 + OVERLAYFS_SUPER_MAGIC = 0x794c7630 O_ACCMODE = 0x3 O_APPEND = 0x8 O_ASYNC = 0x1000 @@ -1117,14 +1235,17 @@ const ( PERF_EVENT_IOC_ID = 0x40082407 PERF_EVENT_IOC_PAUSE_OUTPUT = 0x80042409 PERF_EVENT_IOC_PERIOD = 0x80082404 + PERF_EVENT_IOC_QUERY_BPF = 0xc008240a PERF_EVENT_IOC_REFRESH = 0x20002402 PERF_EVENT_IOC_RESET = 0x20002403 PERF_EVENT_IOC_SET_BPF = 0x80042408 PERF_EVENT_IOC_SET_FILTER = 0x80082406 PERF_EVENT_IOC_SET_OUTPUT = 0x20002405 + PIPEFS_MAGIC = 0x50495045 PRIO_PGRP = 0x1 PRIO_PROCESS = 0x0 PRIO_USER = 0x2 + PROC_SUPER_MAGIC = 0x9fa0 PROT_EXEC = 0x4 PROT_GROWSDOWN = 0x1000000 PROT_GROWSUP = 0x2000000 @@ -1230,6 +1351,7 @@ const ( PR_TSC_SIGSEGV = 0x2 PR_UNALIGN_NOPRINT = 0x1 PR_UNALIGN_SIGBUS = 0x2 + PSTOREFS_MAGIC = 0x6165676c PTRACE_ATTACH = 0x10 PTRACE_CONT = 0x7 PTRACE_DETACH = 0x11 @@ -1278,6 +1400,7 @@ const ( PTRACE_POKETEXT_3264 = 0xc2 PTRACE_POKEUSR = 0x6 PTRACE_SECCOMP_GET_FILTER = 0x420c + PTRACE_SECCOMP_GET_METADATA = 0x420d PTRACE_SEIZE = 0x4206 PTRACE_SETFPREGS = 0xf PTRACE_SETOPTIONS = 0x4200 @@ -1290,6 +1413,11 @@ const ( PTRACE_SINGLESTEP = 0x9 PTRACE_SYSCALL = 0x18 PTRACE_TRACEME = 0x0 + QNX4_SUPER_MAGIC = 0x2f + QNX6_SUPER_MAGIC = 0x68191122 + RAMFS_MAGIC = 0x858458f6 + RDTGROUP_SUPER_MAGIC = 0x7655821 + REISERFS_SUPER_MAGIC = 0x52654973 RLIMIT_AS = 0x6 RLIMIT_CORE = 0x4 RLIMIT_CPU = 0x0 @@ -1475,6 +1603,8 @@ const ( SECCOMP_MODE_DISABLED = 0x0 SECCOMP_MODE_FILTER = 0x2 SECCOMP_MODE_STRICT = 0x1 + SECURITYFS_MAGIC = 0x73636673 + SELINUX_MAGIC = 0xf97cff8c SHUT_RD = 0x0 SHUT_RDWR = 0x2 SHUT_WR = 0x1 @@ -1559,6 +1689,23 @@ const ( SIOCSPGRP = 0x80047308 SIOCSRARP = 0x8962 SIOCWANDEV = 0x894a + SMACK_MAGIC = 0x43415d53 + SMART_AUTOSAVE = 0xd2 + SMART_AUTO_OFFLINE = 0xdb + SMART_DISABLE = 0xd9 + SMART_ENABLE = 0xd8 + SMART_HCYL_PASS = 0xc2 + SMART_IMMEDIATE_OFFLINE = 0xd4 + SMART_LCYL_PASS = 0x4f + SMART_READ_LOG_SECTOR = 0xd5 + SMART_READ_THRESHOLDS = 0xd1 + SMART_READ_VALUES = 0xd0 + SMART_SAVE = 0xd3 + SMART_STATUS = 0xda + SMART_WRITE_LOG_SECTOR = 0xd6 + SMART_WRITE_THRESHOLDS = 0xd7 + SMB_SUPER_MAGIC = 0x517b + SOCKFS_MAGIC = 0x534f434b SOCK_CLOEXEC = 0x80000 SOCK_DCCP = 0x6 SOCK_DGRAM = 0x1 @@ -1670,6 +1817,8 @@ const ( SPLICE_F_MORE = 0x4 SPLICE_F_MOVE = 0x1 SPLICE_F_NONBLOCK = 0x2 + SQUASHFS_MAGIC = 0x73717368 + STACK_END_MAGIC = 0x57ac6e9d STATX_ALL = 0xfff STATX_ATIME = 0x20 STATX_ATTR_APPEND = 0x20 @@ -1691,6 +1840,7 @@ const ( STATX_TYPE = 0x1 STATX_UID = 0x8 STATX__RESERVED = 0x80000000 + SYSFS_MAGIC = 0x62656572 S_BLKSIZE = 0x200 S_IEXEC = 0x40 S_IFBLK = 0x6000 @@ -1873,7 +2023,27 @@ const ( TIOCSTI = 0x5472 TIOCSWINSZ = 0x80087467 TIOCVHANGUP = 0x5437 + TMPFS_MAGIC = 0x1021994 TOSTOP = 0x8000 + TPACKET_ALIGNMENT = 0x10 + TPACKET_HDRLEN = 0x34 + TP_STATUS_AVAILABLE = 0x0 + TP_STATUS_BLK_TMO = 0x20 + TP_STATUS_COPY = 0x2 + TP_STATUS_CSUMNOTREADY = 0x8 + TP_STATUS_CSUM_VALID = 0x80 + TP_STATUS_KERNEL = 0x0 + TP_STATUS_LOSING = 0x4 + TP_STATUS_SENDING = 0x2 + TP_STATUS_SEND_REQUEST = 0x1 + TP_STATUS_TS_RAW_HARDWARE = -0x80000000 + TP_STATUS_TS_SOFTWARE = 0x20000000 + TP_STATUS_TS_SYS_HARDWARE = 0x40000000 + TP_STATUS_USER = 0x1 + TP_STATUS_VLAN_TPID_VALID = 0x40 + TP_STATUS_VLAN_VALID = 0x10 + TP_STATUS_WRONG_FORMAT = 0x4 + TRACEFS_MAGIC = 0x74726163 TS_COMM_LEN = 0x20 TUNATTACHFILTER = 0x801054d5 TUNDETACHFILTER = 0x801054d6 @@ -1885,6 +2055,7 @@ const ( TUNGETVNETHDRSZ = 0x400454d7 TUNGETVNETLE = 0x400454dd TUNSETDEBUG = 0x800454c9 + TUNSETFILTEREBPF = 0x400454e1 TUNSETGROUP = 0x800454ce TUNSETIFF = 0x800454ca TUNSETIFINDEX = 0x800454da @@ -1895,13 +2066,17 @@ const ( TUNSETPERSIST = 0x800454cb TUNSETQUEUE = 0x800454d9 TUNSETSNDBUF = 0x800454d4 + TUNSETSTEERINGEBPF = 0x400454e0 TUNSETTXFILTER = 0x800454d1 TUNSETVNETBE = 0x800454de TUNSETVNETHDRSZ = 0x800454d8 TUNSETVNETLE = 0x800454dc + UDF_SUPER_MAGIC = 0x15013346 UMOUNT_NOFOLLOW = 0x8 + USBDEVICE_SUPER_MAGIC = 0x9fa2 UTIME_NOW = 0x3fffffff UTIME_OMIT = 0x3ffffffe + V9FS_MAGIC = 0x1021997 VDISCARD = 0xd VEOF = 0x10 VEOL = 0x11 @@ -1944,6 +2119,86 @@ const ( WDIOC_SETPRETIMEOUT = 0xc0045708 WDIOC_SETTIMEOUT = 0xc0045706 WEXITED = 0x4 + WIN_ACKMEDIACHANGE = 0xdb + WIN_CHECKPOWERMODE1 = 0xe5 + WIN_CHECKPOWERMODE2 = 0x98 + WIN_DEVICE_RESET = 0x8 + WIN_DIAGNOSE = 0x90 + WIN_DOORLOCK = 0xde + WIN_DOORUNLOCK = 0xdf + WIN_DOWNLOAD_MICROCODE = 0x92 + WIN_FLUSH_CACHE = 0xe7 + WIN_FLUSH_CACHE_EXT = 0xea + WIN_FORMAT = 0x50 + WIN_GETMEDIASTATUS = 0xda + WIN_IDENTIFY = 0xec + WIN_IDENTIFY_DMA = 0xee + WIN_IDLEIMMEDIATE = 0xe1 + WIN_INIT = 0x60 + WIN_MEDIAEJECT = 0xed + WIN_MULTREAD = 0xc4 + WIN_MULTREAD_EXT = 0x29 + WIN_MULTWRITE = 0xc5 + WIN_MULTWRITE_EXT = 0x39 + WIN_NOP = 0x0 + WIN_PACKETCMD = 0xa0 + WIN_PIDENTIFY = 0xa1 + WIN_POSTBOOT = 0xdc + WIN_PREBOOT = 0xdd + WIN_QUEUED_SERVICE = 0xa2 + WIN_READ = 0x20 + WIN_READDMA = 0xc8 + WIN_READDMA_EXT = 0x25 + WIN_READDMA_ONCE = 0xc9 + WIN_READDMA_QUEUED = 0xc7 + WIN_READDMA_QUEUED_EXT = 0x26 + WIN_READ_BUFFER = 0xe4 + WIN_READ_EXT = 0x24 + WIN_READ_LONG = 0x22 + WIN_READ_LONG_ONCE = 0x23 + WIN_READ_NATIVE_MAX = 0xf8 + WIN_READ_NATIVE_MAX_EXT = 0x27 + WIN_READ_ONCE = 0x21 + WIN_RECAL = 0x10 + WIN_RESTORE = 0x10 + WIN_SECURITY_DISABLE = 0xf6 + WIN_SECURITY_ERASE_PREPARE = 0xf3 + WIN_SECURITY_ERASE_UNIT = 0xf4 + WIN_SECURITY_FREEZE_LOCK = 0xf5 + WIN_SECURITY_SET_PASS = 0xf1 + WIN_SECURITY_UNLOCK = 0xf2 + WIN_SEEK = 0x70 + WIN_SETFEATURES = 0xef + WIN_SETIDLE1 = 0xe3 + WIN_SETIDLE2 = 0x97 + WIN_SETMULT = 0xc6 + WIN_SET_MAX = 0xf9 + WIN_SET_MAX_EXT = 0x37 + WIN_SLEEPNOW1 = 0xe6 + WIN_SLEEPNOW2 = 0x99 + WIN_SMART = 0xb0 + WIN_SPECIFY = 0x91 + WIN_SRST = 0x8 + WIN_STANDBY = 0xe2 + WIN_STANDBY2 = 0x96 + WIN_STANDBYNOW1 = 0xe0 + WIN_STANDBYNOW2 = 0x94 + WIN_VERIFY = 0x40 + WIN_VERIFY_EXT = 0x42 + WIN_VERIFY_ONCE = 0x41 + WIN_WRITE = 0x30 + WIN_WRITEDMA = 0xca + WIN_WRITEDMA_EXT = 0x35 + WIN_WRITEDMA_ONCE = 0xcb + WIN_WRITEDMA_QUEUED = 0xcc + WIN_WRITEDMA_QUEUED_EXT = 0x36 + WIN_WRITE_BUFFER = 0xe8 + WIN_WRITE_EXT = 0x34 + WIN_WRITE_LONG = 0x32 + WIN_WRITE_LONG_ONCE = 0x33 + WIN_WRITE_ONCE = 0x31 + WIN_WRITE_SAME = 0xe9 + WIN_WRITE_VERIFY = 0x3c WNOHANG = 0x1 WNOTHREAD = 0x20000000 WNOWAIT = 0x1000000 @@ -1953,7 +2208,9 @@ const ( XATTR_CREATE = 0x1 XATTR_REPLACE = 0x2 XCASE = 0x4 + XENFS_SUPER_MAGIC = 0xabba1974 XTABS = 0x1800 + ZSMALLOC_MAGIC = 0x58295829 ) // Errors @@ -2135,174 +2392,182 @@ const ( ) // Error table -var errors = [...]string{ - 1: "operation not permitted", - 2: "no such file or directory", - 3: "no such process", - 4: "interrupted system call", - 5: "input/output error", - 6: "no such device or address", - 7: "argument list too long", - 8: "exec format error", - 9: "bad file descriptor", - 10: "no child processes", - 11: "resource temporarily unavailable", - 12: "cannot allocate memory", - 13: "permission denied", - 14: "bad address", - 15: "block device required", - 16: "device or resource busy", - 17: "file exists", - 18: "invalid cross-device link", - 19: "no such device", - 20: "not a directory", - 21: "is a directory", - 22: "invalid argument", - 23: "too many open files in system", - 24: "too many open files", - 25: "inappropriate ioctl for device", - 26: "text file busy", - 27: "file too large", - 28: "no space left on device", - 29: "illegal seek", - 30: "read-only file system", - 31: "too many links", - 32: "broken pipe", - 33: "numerical argument out of domain", - 34: "numerical result out of range", - 35: "no message of desired type", - 36: "identifier removed", - 37: "channel number out of range", - 38: "level 2 not synchronized", - 39: "level 3 halted", - 40: "level 3 reset", - 41: "link number out of range", - 42: "protocol driver not attached", - 43: "no CSI structure available", - 44: "level 2 halted", - 45: "resource deadlock avoided", - 46: "no locks available", - 50: "invalid exchange", - 51: "invalid request descriptor", - 52: "exchange full", - 53: "no anode", - 54: "invalid request code", - 55: "invalid slot", - 56: "file locking deadlock error", - 59: "bad font file format", - 60: "device not a stream", - 61: "no data available", - 62: "timer expired", - 63: "out of streams resources", - 64: "machine is not on the network", - 65: "package not installed", - 66: "object is remote", - 67: "link has been severed", - 68: "advertise error", - 69: "srmount error", - 70: "communication error on send", - 71: "protocol error", - 73: "RFS specific error", - 74: "multihop attempted", - 77: "bad message", - 78: "file name too long", - 79: "value too large for defined data type", - 80: "name not unique on network", - 81: "file descriptor in bad state", - 82: "remote address changed", - 83: "can not access a needed shared library", - 84: "accessing a corrupted shared library", - 85: ".lib section in a.out corrupted", - 86: "attempting to link in too many shared libraries", - 87: "cannot exec a shared library directly", - 88: "invalid or incomplete multibyte or wide character", - 89: "function not implemented", - 90: "too many levels of symbolic links", - 91: "interrupted system call should be restarted", - 92: "streams pipe error", - 93: "directory not empty", - 94: "too many users", - 95: "socket operation on non-socket", - 96: "destination address required", - 97: "message too long", - 98: "protocol wrong type for socket", - 99: "protocol not available", - 120: "protocol not supported", - 121: "socket type not supported", - 122: "operation not supported", - 123: "protocol family not supported", - 124: "address family not supported by protocol", - 125: "address already in use", - 126: "cannot assign requested address", - 127: "network is down", - 128: "network is unreachable", - 129: "network dropped connection on reset", - 130: "software caused connection abort", - 131: "connection reset by peer", - 132: "no buffer space available", - 133: "transport endpoint is already connected", - 134: "transport endpoint is not connected", - 135: "structure needs cleaning", - 137: "not a XENIX named type file", - 138: "no XENIX semaphores available", - 139: "is a named type file", - 140: "remote I/O error", - 141: "unknown error 141", - 142: "unknown error 142", - 143: "cannot send after transport endpoint shutdown", - 144: "too many references: cannot splice", - 145: "connection timed out", - 146: "connection refused", - 147: "host is down", - 148: "no route to host", - 149: "operation already in progress", - 150: "operation now in progress", - 151: "stale file handle", - 158: "operation canceled", - 159: "no medium found", - 160: "wrong medium type", - 161: "required key not available", - 162: "key has expired", - 163: "key has been revoked", - 164: "key was rejected by service", - 165: "owner died", - 166: "state not recoverable", - 167: "operation not possible due to RF-kill", - 168: "memory page has hardware error", - 1133: "disk quota exceeded", +var errorList = [...]struct { + num syscall.Errno + name string + desc string +}{ + {1, "EPERM", "operation not permitted"}, + {2, "ENOENT", "no such file or directory"}, + {3, "ESRCH", "no such process"}, + {4, "EINTR", "interrupted system call"}, + {5, "EIO", "input/output error"}, + {6, "ENXIO", "no such device or address"}, + {7, "E2BIG", "argument list too long"}, + {8, "ENOEXEC", "exec format error"}, + {9, "EBADF", "bad file descriptor"}, + {10, "ECHILD", "no child processes"}, + {11, "EAGAIN", "resource temporarily unavailable"}, + {12, "ENOMEM", "cannot allocate memory"}, + {13, "EACCES", "permission denied"}, + {14, "EFAULT", "bad address"}, + {15, "ENOTBLK", "block device required"}, + {16, "EBUSY", "device or resource busy"}, + {17, "EEXIST", "file exists"}, + {18, "EXDEV", "invalid cross-device link"}, + {19, "ENODEV", "no such device"}, + {20, "ENOTDIR", "not a directory"}, + {21, "EISDIR", "is a directory"}, + {22, "EINVAL", "invalid argument"}, + {23, "ENFILE", "too many open files in system"}, + {24, "EMFILE", "too many open files"}, + {25, "ENOTTY", "inappropriate ioctl for device"}, + {26, "ETXTBSY", "text file busy"}, + {27, "EFBIG", "file too large"}, + {28, "ENOSPC", "no space left on device"}, + {29, "ESPIPE", "illegal seek"}, + {30, "EROFS", "read-only file system"}, + {31, "EMLINK", "too many links"}, + {32, "EPIPE", "broken pipe"}, + {33, "EDOM", "numerical argument out of domain"}, + {34, "ERANGE", "numerical result out of range"}, + {35, "ENOMSG", "no message of desired type"}, + {36, "EIDRM", "identifier removed"}, + {37, "ECHRNG", "channel number out of range"}, + {38, "EL2NSYNC", "level 2 not synchronized"}, + {39, "EL3HLT", "level 3 halted"}, + {40, "EL3RST", "level 3 reset"}, + {41, "ELNRNG", "link number out of range"}, + {42, "EUNATCH", "protocol driver not attached"}, + {43, "ENOCSI", "no CSI structure available"}, + {44, "EL2HLT", "level 2 halted"}, + {45, "EDEADLK", "resource deadlock avoided"}, + {46, "ENOLCK", "no locks available"}, + {50, "EBADE", "invalid exchange"}, + {51, "EBADR", "invalid request descriptor"}, + {52, "EXFULL", "exchange full"}, + {53, "ENOANO", "no anode"}, + {54, "EBADRQC", "invalid request code"}, + {55, "EBADSLT", "invalid slot"}, + {56, "EDEADLOCK", "file locking deadlock error"}, + {59, "EBFONT", "bad font file format"}, + {60, "ENOSTR", "device not a stream"}, + {61, "ENODATA", "no data available"}, + {62, "ETIME", "timer expired"}, + {63, "ENOSR", "out of streams resources"}, + {64, "ENONET", "machine is not on the network"}, + {65, "ENOPKG", "package not installed"}, + {66, "EREMOTE", "object is remote"}, + {67, "ENOLINK", "link has been severed"}, + {68, "EADV", "advertise error"}, + {69, "ESRMNT", "srmount error"}, + {70, "ECOMM", "communication error on send"}, + {71, "EPROTO", "protocol error"}, + {73, "EDOTDOT", "RFS specific error"}, + {74, "EMULTIHOP", "multihop attempted"}, + {77, "EBADMSG", "bad message"}, + {78, "ENAMETOOLONG", "file name too long"}, + {79, "EOVERFLOW", "value too large for defined data type"}, + {80, "ENOTUNIQ", "name not unique on network"}, + {81, "EBADFD", "file descriptor in bad state"}, + {82, "EREMCHG", "remote address changed"}, + {83, "ELIBACC", "can not access a needed shared library"}, + {84, "ELIBBAD", "accessing a corrupted shared library"}, + {85, "ELIBSCN", ".lib section in a.out corrupted"}, + {86, "ELIBMAX", "attempting to link in too many shared libraries"}, + {87, "ELIBEXEC", "cannot exec a shared library directly"}, + {88, "EILSEQ", "invalid or incomplete multibyte or wide character"}, + {89, "ENOSYS", "function not implemented"}, + {90, "ELOOP", "too many levels of symbolic links"}, + {91, "ERESTART", "interrupted system call should be restarted"}, + {92, "ESTRPIPE", "streams pipe error"}, + {93, "ENOTEMPTY", "directory not empty"}, + {94, "EUSERS", "too many users"}, + {95, "ENOTSOCK", "socket operation on non-socket"}, + {96, "EDESTADDRREQ", "destination address required"}, + {97, "EMSGSIZE", "message too long"}, + {98, "EPROTOTYPE", "protocol wrong type for socket"}, + {99, "ENOPROTOOPT", "protocol not available"}, + {120, "EPROTONOSUPPORT", "protocol not supported"}, + {121, "ESOCKTNOSUPPORT", "socket type not supported"}, + {122, "ENOTSUP", "operation not supported"}, + {123, "EPFNOSUPPORT", "protocol family not supported"}, + {124, "EAFNOSUPPORT", "address family not supported by protocol"}, + {125, "EADDRINUSE", "address already in use"}, + {126, "EADDRNOTAVAIL", "cannot assign requested address"}, + {127, "ENETDOWN", "network is down"}, + {128, "ENETUNREACH", "network is unreachable"}, + {129, "ENETRESET", "network dropped connection on reset"}, + {130, "ECONNABORTED", "software caused connection abort"}, + {131, "ECONNRESET", "connection reset by peer"}, + {132, "ENOBUFS", "no buffer space available"}, + {133, "EISCONN", "transport endpoint is already connected"}, + {134, "ENOTCONN", "transport endpoint is not connected"}, + {135, "EUCLEAN", "structure needs cleaning"}, + {137, "ENOTNAM", "not a XENIX named type file"}, + {138, "ENAVAIL", "no XENIX semaphores available"}, + {139, "EISNAM", "is a named type file"}, + {140, "EREMOTEIO", "remote I/O error"}, + {141, "EINIT", "unknown error 141"}, + {142, "EREMDEV", "unknown error 142"}, + {143, "ESHUTDOWN", "cannot send after transport endpoint shutdown"}, + {144, "ETOOMANYREFS", "too many references: cannot splice"}, + {145, "ETIMEDOUT", "connection timed out"}, + {146, "ECONNREFUSED", "connection refused"}, + {147, "EHOSTDOWN", "host is down"}, + {148, "EHOSTUNREACH", "no route to host"}, + {149, "EALREADY", "operation already in progress"}, + {150, "EINPROGRESS", "operation now in progress"}, + {151, "ESTALE", "stale file handle"}, + {158, "ECANCELED", "operation canceled"}, + {159, "ENOMEDIUM", "no medium found"}, + {160, "EMEDIUMTYPE", "wrong medium type"}, + {161, "ENOKEY", "required key not available"}, + {162, "EKEYEXPIRED", "key has expired"}, + {163, "EKEYREVOKED", "key has been revoked"}, + {164, "EKEYREJECTED", "key was rejected by service"}, + {165, "EOWNERDEAD", "owner died"}, + {166, "ENOTRECOVERABLE", "state not recoverable"}, + {167, "ERFKILL", "operation not possible due to RF-kill"}, + {168, "EHWPOISON", "memory page has hardware error"}, + {1133, "EDQUOT", "disk quota exceeded"}, } // Signal table -var signals = [...]string{ - 1: "hangup", - 2: "interrupt", - 3: "quit", - 4: "illegal instruction", - 5: "trace/breakpoint trap", - 6: "aborted", - 7: "EMT trap", - 8: "floating point exception", - 9: "killed", - 10: "bus error", - 11: "segmentation fault", - 12: "bad system call", - 13: "broken pipe", - 14: "alarm clock", - 15: "terminated", - 16: "user defined signal 1", - 17: "user defined signal 2", - 18: "child exited", - 19: "power failure", - 20: "window changed", - 21: "urgent I/O condition", - 22: "I/O possible", - 23: "stopped (signal)", - 24: "stopped", - 25: "continued", - 26: "stopped (tty input)", - 27: "stopped (tty output)", - 28: "virtual timer expired", - 29: "profiling timer expired", - 30: "CPU time limit exceeded", - 31: "file size limit exceeded", +var signalList = [...]struct { + num syscall.Signal + name string + desc string +}{ + {1, "SIGHUP", "hangup"}, + {2, "SIGINT", "interrupt"}, + {3, "SIGQUIT", "quit"}, + {4, "SIGILL", "illegal instruction"}, + {5, "SIGTRAP", "trace/breakpoint trap"}, + {6, "SIGABRT", "aborted"}, + {7, "SIGEMT", "EMT trap"}, + {8, "SIGFPE", "floating point exception"}, + {9, "SIGKILL", "killed"}, + {10, "SIGBUS", "bus error"}, + {11, "SIGSEGV", "segmentation fault"}, + {12, "SIGSYS", "bad system call"}, + {13, "SIGPIPE", "broken pipe"}, + {14, "SIGALRM", "alarm clock"}, + {15, "SIGTERM", "terminated"}, + {16, "SIGUSR1", "user defined signal 1"}, + {17, "SIGUSR2", "user defined signal 2"}, + {18, "SIGCHLD", "child exited"}, + {19, "SIGPWR", "power failure"}, + {20, "SIGWINCH", "window changed"}, + {21, "SIGURG", "urgent I/O condition"}, + {22, "SIGIO", "I/O possible"}, + {23, "SIGSTOP", "stopped (signal)"}, + {24, "SIGTSTP", "stopped"}, + {25, "SIGCONT", "continued"}, + {26, "SIGTTIN", "stopped (tty input)"}, + {27, "SIGTTOU", "stopped (tty output)"}, + {28, "SIGVTALRM", "virtual timer expired"}, + {29, "SIGPROF", "profiling timer expired"}, + {30, "SIGXCPU", "CPU time limit exceeded"}, + {31, "SIGXFSZ", "file size limit exceeded"}, } diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go index 5e406070180..1131d3ca165 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go @@ -11,6 +11,11 @@ package unix import "syscall" const ( + AAFS_MAGIC = 0x5a3c69f0 + ADFS_SUPER_MAGIC = 0xadf5 + AFFS_SUPER_MAGIC = 0xadff + AFS_FS_MAGIC = 0x6b414653 + AFS_SUPER_MAGIC = 0x5346414f AF_ALG = 0x26 AF_APPLETALK = 0x5 AF_ASH = 0x12 @@ -66,6 +71,7 @@ const ( ALG_SET_IV = 0x2 ALG_SET_KEY = 0x1 ALG_SET_OP = 0x3 + ANON_INODE_FS_MAGIC = 0x9041934 ARPHRD_6LOWPAN = 0x339 ARPHRD_ADAPT = 0x108 ARPHRD_APPLETLK = 0x8 @@ -133,6 +139,7 @@ const ( ARPHRD_VOID = 0xffff ARPHRD_VSOCKMON = 0x33a ARPHRD_X25 = 0x10f + AUTOFS_SUPER_MAGIC = 0x187 B0 = 0x0 B1000000 = 0x1008 B110 = 0x3 @@ -164,6 +171,9 @@ const ( B75 = 0x2 B921600 = 0x1007 B9600 = 0xd + BALLOON_KVM_MAGIC = 0x13661366 + BDEVFS_MAGIC = 0x62646576 + BINFMTFS_MAGIC = 0x42494e4d BLKBSZGET = 0x40081270 BLKBSZSET = 0x80081271 BLKFLSBUF = 0x20001261 @@ -188,6 +198,7 @@ const ( BPF_AND = 0x50 BPF_B = 0x10 BPF_DIV = 0x30 + BPF_FS_MAGIC = 0xcafe4a11 BPF_H = 0x8 BPF_IMM = 0x0 BPF_IND = 0x40 @@ -229,6 +240,8 @@ const ( BS0 = 0x0 BS1 = 0x2000 BSDLY = 0x2000 + BTRFS_SUPER_MAGIC = 0x9123683e + BTRFS_TEST_MAGIC = 0x73727279 CAN_BCM = 0x2 CAN_EFF_FLAG = 0x80000000 CAN_EFF_ID_BITS = 0x1d @@ -252,6 +265,8 @@ const ( CBAUD = 0x100f CBAUDEX = 0x1000 CFLUSH = 0xf + CGROUP2_SUPER_MAGIC = 0x63677270 + CGROUP_SUPER_MAGIC = 0x27e0eb CIBAUD = 0x100f0000 CLOCAL = 0x800 CLOCK_BOOTTIME = 0x7 @@ -294,10 +309,12 @@ const ( CLONE_VFORK = 0x4000 CLONE_VM = 0x100 CMSPAR = 0x40000000 + CODA_SUPER_MAGIC = 0x73757245 CR0 = 0x0 CR1 = 0x200 CR2 = 0x400 CR3 = 0x600 + CRAMFS_MAGIC = 0x28cd3d45 CRDLY = 0x600 CREAD = 0x80 CRTSCTS = 0x80000000 @@ -312,6 +329,9 @@ const ( CSTOP = 0x13 CSTOPB = 0x40 CSUSP = 0x1a + DAXFS_MAGIC = 0x64646178 + DEBUGFS_MAGIC = 0x64626720 + DEVPTS_SUPER_MAGIC = 0x1cd1 DT_BLK = 0x6 DT_CHR = 0x2 DT_DIR = 0x4 @@ -328,9 +348,12 @@ const ( ECHOKE = 0x800 ECHONL = 0x40 ECHOPRT = 0x400 + ECRYPTFS_SUPER_MAGIC = 0xf15f EFD_CLOEXEC = 0x80000 EFD_NONBLOCK = 0x80 EFD_SEMAPHORE = 0x1 + EFIVARFS_MAGIC = 0xde5e81e4 + EFS_SUPER_MAGIC = 0x414a53 ENCODING_DEFAULT = 0x0 ENCODING_FM_MARK = 0x3 ENCODING_FM_SPACE = 0x4 @@ -392,6 +415,7 @@ const ( ETH_P_ECONET = 0x18 ETH_P_EDSA = 0xdada ETH_P_ERSPAN = 0x88be + ETH_P_ERSPAN2 = 0x22eb ETH_P_FCOE = 0x8906 ETH_P_FIP = 0x8914 ETH_P_HDLC = 0x19 @@ -445,9 +469,14 @@ const ( ETH_P_WCCP = 0x883e ETH_P_X25 = 0x805 ETH_P_XDSA = 0xf8 + EXABYTE_ENABLE_NEST = 0xf0 + EXT2_SUPER_MAGIC = 0xef53 + EXT3_SUPER_MAGIC = 0xef53 + EXT4_SUPER_MAGIC = 0xef53 EXTA = 0xe EXTB = 0xf EXTPROC = 0x10000 + F2FS_SUPER_MAGIC = 0xf2f52010 FALLOC_FL_COLLAPSE_RANGE = 0x8 FALLOC_FL_INSERT_RANGE = 0x20 FALLOC_FL_KEEP_SIZE = 0x1 @@ -481,6 +510,7 @@ const ( FS_POLICY_FLAGS_PAD_8 = 0x1 FS_POLICY_FLAGS_PAD_MASK = 0x3 FS_POLICY_FLAGS_VALID = 0x3 + FUTEXFS_SUPER_MAGIC = 0xbad1dea F_ADD_SEALS = 0x409 F_DUPFD = 0x0 F_DUPFD_CLOEXEC = 0x406 @@ -542,6 +572,49 @@ const ( GENL_UNS_ADMIN_PERM = 0x10 GRND_NONBLOCK = 0x1 GRND_RANDOM = 0x2 + HDIO_DRIVE_CMD = 0x31f + HDIO_DRIVE_CMD_AEB = 0x31e + HDIO_DRIVE_CMD_HDR_SIZE = 0x4 + HDIO_DRIVE_HOB_HDR_SIZE = 0x8 + HDIO_DRIVE_RESET = 0x31c + HDIO_DRIVE_TASK = 0x31e + HDIO_DRIVE_TASKFILE = 0x31d + HDIO_DRIVE_TASK_HDR_SIZE = 0x8 + HDIO_GETGEO = 0x301 + HDIO_GET_32BIT = 0x309 + HDIO_GET_ACOUSTIC = 0x30f + HDIO_GET_ADDRESS = 0x310 + HDIO_GET_BUSSTATE = 0x31a + HDIO_GET_DMA = 0x30b + HDIO_GET_IDENTITY = 0x30d + HDIO_GET_KEEPSETTINGS = 0x308 + HDIO_GET_MULTCOUNT = 0x304 + HDIO_GET_NICE = 0x30c + HDIO_GET_NOWERR = 0x30a + HDIO_GET_QDMA = 0x305 + HDIO_GET_UNMASKINTR = 0x302 + HDIO_GET_WCACHE = 0x30e + HDIO_OBSOLETE_IDENTITY = 0x307 + HDIO_SCAN_HWIF = 0x328 + HDIO_SET_32BIT = 0x324 + HDIO_SET_ACOUSTIC = 0x32c + HDIO_SET_ADDRESS = 0x32f + HDIO_SET_BUSSTATE = 0x32d + HDIO_SET_DMA = 0x326 + HDIO_SET_KEEPSETTINGS = 0x323 + HDIO_SET_MULTCOUNT = 0x321 + HDIO_SET_NICE = 0x329 + HDIO_SET_NOWERR = 0x325 + HDIO_SET_PIO_MODE = 0x327 + HDIO_SET_QDMA = 0x32e + HDIO_SET_UNMASKINTR = 0x322 + HDIO_SET_WCACHE = 0x32b + HDIO_SET_XFER = 0x306 + HDIO_TRISTATE_HWIF = 0x31b + HDIO_UNREGISTER_HWIF = 0x32a + HOSTFS_SUPER_MAGIC = 0xc0ffee + HPFS_SUPER_MAGIC = 0xf995e849 + HUGETLBFS_MAGIC = 0x958458f6 HUPCL = 0x400 IBSHIFT = 0x10 ICANON = 0x2 @@ -792,12 +865,14 @@ const ( IP_UNICAST_IF = 0x32 IP_XFRM_POLICY = 0x11 ISIG = 0x1 + ISOFS_SUPER_MAGIC = 0x9660 ISTRIP = 0x20 IUCLC = 0x200 IUTF8 = 0x4000 IXANY = 0x800 IXOFF = 0x1000 IXON = 0x400 + JFFS2_SUPER_MAGIC = 0x72b6 KEYCTL_ASSUME_AUTHORITY = 0x10 KEYCTL_CHOWN = 0x4 KEYCTL_CLEAR = 0x7 @@ -894,9 +969,15 @@ const ( MCL_CURRENT = 0x1 MCL_FUTURE = 0x2 MCL_ONFAULT = 0x4 + MINIX2_SUPER_MAGIC = 0x2468 + MINIX2_SUPER_MAGIC2 = 0x2478 + MINIX3_SUPER_MAGIC = 0x4d5a + MINIX_SUPER_MAGIC = 0x137f + MINIX_SUPER_MAGIC2 = 0x138f MNT_DETACH = 0x2 MNT_EXPIRE = 0x4 MNT_FORCE = 0x1 + MSDOS_SUPER_MAGIC = 0x4d44 MSG_BATCH = 0x40000 MSG_CMSG_CLOEXEC = 0x40000000 MSG_CONFIRM = 0x800 @@ -956,7 +1037,9 @@ const ( MS_SYNCHRONOUS = 0x10 MS_UNBINDABLE = 0x20000 MS_VERBOSE = 0x8000 + MTD_INODE_FS_MAGIC = 0x11307854 NAME_MAX = 0xff + NCP_SUPER_MAGIC = 0x564c NETLINK_ADD_MEMBERSHIP = 0x1 NETLINK_AUDIT = 0x9 NETLINK_BROADCAST_ERROR = 0x4 @@ -991,6 +1074,37 @@ const ( NETLINK_UNUSED = 0x1 NETLINK_USERSOCK = 0x2 NETLINK_XFRM = 0x6 + NFNETLINK_V0 = 0x0 + NFNLGRP_ACCT_QUOTA = 0x8 + NFNLGRP_CONNTRACK_DESTROY = 0x3 + NFNLGRP_CONNTRACK_EXP_DESTROY = 0x6 + NFNLGRP_CONNTRACK_EXP_NEW = 0x4 + NFNLGRP_CONNTRACK_EXP_UPDATE = 0x5 + NFNLGRP_CONNTRACK_NEW = 0x1 + NFNLGRP_CONNTRACK_UPDATE = 0x2 + NFNLGRP_MAX = 0x9 + NFNLGRP_NFTABLES = 0x7 + NFNLGRP_NFTRACE = 0x9 + NFNLGRP_NONE = 0x0 + NFNL_BATCH_MAX = 0x1 + NFNL_MSG_BATCH_BEGIN = 0x10 + NFNL_MSG_BATCH_END = 0x11 + NFNL_NFA_NEST = 0x8000 + NFNL_SUBSYS_ACCT = 0x7 + NFNL_SUBSYS_COUNT = 0xc + NFNL_SUBSYS_CTHELPER = 0x9 + NFNL_SUBSYS_CTNETLINK = 0x1 + NFNL_SUBSYS_CTNETLINK_EXP = 0x2 + NFNL_SUBSYS_CTNETLINK_TIMEOUT = 0x8 + NFNL_SUBSYS_IPSET = 0x6 + NFNL_SUBSYS_NFTABLES = 0xa + NFNL_SUBSYS_NFT_COMPAT = 0xb + NFNL_SUBSYS_NONE = 0x0 + NFNL_SUBSYS_OSF = 0x5 + NFNL_SUBSYS_QUEUE = 0x3 + NFNL_SUBSYS_ULOG = 0x4 + NFS_SUPER_MAGIC = 0x6969 + NILFS_SUPER_MAGIC = 0x3434 NL0 = 0x0 NL1 = 0x100 NLA_ALIGNTO = 0x4 @@ -1023,6 +1137,8 @@ const ( NLM_F_REQUEST = 0x1 NLM_F_ROOT = 0x100 NOFLSH = 0x80 + NSFS_MAGIC = 0x6e736673 + OCFS2_SUPER_MAGIC = 0x7461636f OCRNL = 0x8 OFDEL = 0x80 OFILL = 0x40 @@ -1030,7 +1146,9 @@ const ( ONLCR = 0x4 ONLRET = 0x20 ONOCR = 0x10 + OPENPROM_SUPER_MAGIC = 0x9fa1 OPOST = 0x1 + OVERLAYFS_SUPER_MAGIC = 0x794c7630 O_ACCMODE = 0x3 O_APPEND = 0x8 O_ASYNC = 0x1000 @@ -1117,14 +1235,17 @@ const ( PERF_EVENT_IOC_ID = 0x40082407 PERF_EVENT_IOC_PAUSE_OUTPUT = 0x80042409 PERF_EVENT_IOC_PERIOD = 0x80082404 + PERF_EVENT_IOC_QUERY_BPF = 0xc008240a PERF_EVENT_IOC_REFRESH = 0x20002402 PERF_EVENT_IOC_RESET = 0x20002403 PERF_EVENT_IOC_SET_BPF = 0x80042408 PERF_EVENT_IOC_SET_FILTER = 0x80082406 PERF_EVENT_IOC_SET_OUTPUT = 0x20002405 + PIPEFS_MAGIC = 0x50495045 PRIO_PGRP = 0x1 PRIO_PROCESS = 0x0 PRIO_USER = 0x2 + PROC_SUPER_MAGIC = 0x9fa0 PROT_EXEC = 0x4 PROT_GROWSDOWN = 0x1000000 PROT_GROWSUP = 0x2000000 @@ -1230,6 +1351,7 @@ const ( PR_TSC_SIGSEGV = 0x2 PR_UNALIGN_NOPRINT = 0x1 PR_UNALIGN_SIGBUS = 0x2 + PSTOREFS_MAGIC = 0x6165676c PTRACE_ATTACH = 0x10 PTRACE_CONT = 0x7 PTRACE_DETACH = 0x11 @@ -1278,6 +1400,7 @@ const ( PTRACE_POKETEXT_3264 = 0xc2 PTRACE_POKEUSR = 0x6 PTRACE_SECCOMP_GET_FILTER = 0x420c + PTRACE_SECCOMP_GET_METADATA = 0x420d PTRACE_SEIZE = 0x4206 PTRACE_SETFPREGS = 0xf PTRACE_SETOPTIONS = 0x4200 @@ -1290,6 +1413,11 @@ const ( PTRACE_SINGLESTEP = 0x9 PTRACE_SYSCALL = 0x18 PTRACE_TRACEME = 0x0 + QNX4_SUPER_MAGIC = 0x2f + QNX6_SUPER_MAGIC = 0x68191122 + RAMFS_MAGIC = 0x858458f6 + RDTGROUP_SUPER_MAGIC = 0x7655821 + REISERFS_SUPER_MAGIC = 0x52654973 RLIMIT_AS = 0x6 RLIMIT_CORE = 0x4 RLIMIT_CPU = 0x0 @@ -1475,6 +1603,8 @@ const ( SECCOMP_MODE_DISABLED = 0x0 SECCOMP_MODE_FILTER = 0x2 SECCOMP_MODE_STRICT = 0x1 + SECURITYFS_MAGIC = 0x73636673 + SELINUX_MAGIC = 0xf97cff8c SHUT_RD = 0x0 SHUT_RDWR = 0x2 SHUT_WR = 0x1 @@ -1559,6 +1689,23 @@ const ( SIOCSPGRP = 0x80047308 SIOCSRARP = 0x8962 SIOCWANDEV = 0x894a + SMACK_MAGIC = 0x43415d53 + SMART_AUTOSAVE = 0xd2 + SMART_AUTO_OFFLINE = 0xdb + SMART_DISABLE = 0xd9 + SMART_ENABLE = 0xd8 + SMART_HCYL_PASS = 0xc2 + SMART_IMMEDIATE_OFFLINE = 0xd4 + SMART_LCYL_PASS = 0x4f + SMART_READ_LOG_SECTOR = 0xd5 + SMART_READ_THRESHOLDS = 0xd1 + SMART_READ_VALUES = 0xd0 + SMART_SAVE = 0xd3 + SMART_STATUS = 0xda + SMART_WRITE_LOG_SECTOR = 0xd6 + SMART_WRITE_THRESHOLDS = 0xd7 + SMB_SUPER_MAGIC = 0x517b + SOCKFS_MAGIC = 0x534f434b SOCK_CLOEXEC = 0x80000 SOCK_DCCP = 0x6 SOCK_DGRAM = 0x1 @@ -1670,6 +1817,8 @@ const ( SPLICE_F_MORE = 0x4 SPLICE_F_MOVE = 0x1 SPLICE_F_NONBLOCK = 0x2 + SQUASHFS_MAGIC = 0x73717368 + STACK_END_MAGIC = 0x57ac6e9d STATX_ALL = 0xfff STATX_ATIME = 0x20 STATX_ATTR_APPEND = 0x20 @@ -1691,6 +1840,7 @@ const ( STATX_TYPE = 0x1 STATX_UID = 0x8 STATX__RESERVED = 0x80000000 + SYSFS_MAGIC = 0x62656572 S_BLKSIZE = 0x200 S_IEXEC = 0x40 S_IFBLK = 0x6000 @@ -1873,7 +2023,27 @@ const ( TIOCSTI = 0x5472 TIOCSWINSZ = 0x80087467 TIOCVHANGUP = 0x5437 + TMPFS_MAGIC = 0x1021994 TOSTOP = 0x8000 + TPACKET_ALIGNMENT = 0x10 + TPACKET_HDRLEN = 0x34 + TP_STATUS_AVAILABLE = 0x0 + TP_STATUS_BLK_TMO = 0x20 + TP_STATUS_COPY = 0x2 + TP_STATUS_CSUMNOTREADY = 0x8 + TP_STATUS_CSUM_VALID = 0x80 + TP_STATUS_KERNEL = 0x0 + TP_STATUS_LOSING = 0x4 + TP_STATUS_SENDING = 0x2 + TP_STATUS_SEND_REQUEST = 0x1 + TP_STATUS_TS_RAW_HARDWARE = -0x80000000 + TP_STATUS_TS_SOFTWARE = 0x20000000 + TP_STATUS_TS_SYS_HARDWARE = 0x40000000 + TP_STATUS_USER = 0x1 + TP_STATUS_VLAN_TPID_VALID = 0x40 + TP_STATUS_VLAN_VALID = 0x10 + TP_STATUS_WRONG_FORMAT = 0x4 + TRACEFS_MAGIC = 0x74726163 TS_COMM_LEN = 0x20 TUNATTACHFILTER = 0x801054d5 TUNDETACHFILTER = 0x801054d6 @@ -1885,6 +2055,7 @@ const ( TUNGETVNETHDRSZ = 0x400454d7 TUNGETVNETLE = 0x400454dd TUNSETDEBUG = 0x800454c9 + TUNSETFILTEREBPF = 0x400454e1 TUNSETGROUP = 0x800454ce TUNSETIFF = 0x800454ca TUNSETIFINDEX = 0x800454da @@ -1895,13 +2066,17 @@ const ( TUNSETPERSIST = 0x800454cb TUNSETQUEUE = 0x800454d9 TUNSETSNDBUF = 0x800454d4 + TUNSETSTEERINGEBPF = 0x400454e0 TUNSETTXFILTER = 0x800454d1 TUNSETVNETBE = 0x800454de TUNSETVNETHDRSZ = 0x800454d8 TUNSETVNETLE = 0x800454dc + UDF_SUPER_MAGIC = 0x15013346 UMOUNT_NOFOLLOW = 0x8 + USBDEVICE_SUPER_MAGIC = 0x9fa2 UTIME_NOW = 0x3fffffff UTIME_OMIT = 0x3ffffffe + V9FS_MAGIC = 0x1021997 VDISCARD = 0xd VEOF = 0x10 VEOL = 0x11 @@ -1944,6 +2119,86 @@ const ( WDIOC_SETPRETIMEOUT = 0xc0045708 WDIOC_SETTIMEOUT = 0xc0045706 WEXITED = 0x4 + WIN_ACKMEDIACHANGE = 0xdb + WIN_CHECKPOWERMODE1 = 0xe5 + WIN_CHECKPOWERMODE2 = 0x98 + WIN_DEVICE_RESET = 0x8 + WIN_DIAGNOSE = 0x90 + WIN_DOORLOCK = 0xde + WIN_DOORUNLOCK = 0xdf + WIN_DOWNLOAD_MICROCODE = 0x92 + WIN_FLUSH_CACHE = 0xe7 + WIN_FLUSH_CACHE_EXT = 0xea + WIN_FORMAT = 0x50 + WIN_GETMEDIASTATUS = 0xda + WIN_IDENTIFY = 0xec + WIN_IDENTIFY_DMA = 0xee + WIN_IDLEIMMEDIATE = 0xe1 + WIN_INIT = 0x60 + WIN_MEDIAEJECT = 0xed + WIN_MULTREAD = 0xc4 + WIN_MULTREAD_EXT = 0x29 + WIN_MULTWRITE = 0xc5 + WIN_MULTWRITE_EXT = 0x39 + WIN_NOP = 0x0 + WIN_PACKETCMD = 0xa0 + WIN_PIDENTIFY = 0xa1 + WIN_POSTBOOT = 0xdc + WIN_PREBOOT = 0xdd + WIN_QUEUED_SERVICE = 0xa2 + WIN_READ = 0x20 + WIN_READDMA = 0xc8 + WIN_READDMA_EXT = 0x25 + WIN_READDMA_ONCE = 0xc9 + WIN_READDMA_QUEUED = 0xc7 + WIN_READDMA_QUEUED_EXT = 0x26 + WIN_READ_BUFFER = 0xe4 + WIN_READ_EXT = 0x24 + WIN_READ_LONG = 0x22 + WIN_READ_LONG_ONCE = 0x23 + WIN_READ_NATIVE_MAX = 0xf8 + WIN_READ_NATIVE_MAX_EXT = 0x27 + WIN_READ_ONCE = 0x21 + WIN_RECAL = 0x10 + WIN_RESTORE = 0x10 + WIN_SECURITY_DISABLE = 0xf6 + WIN_SECURITY_ERASE_PREPARE = 0xf3 + WIN_SECURITY_ERASE_UNIT = 0xf4 + WIN_SECURITY_FREEZE_LOCK = 0xf5 + WIN_SECURITY_SET_PASS = 0xf1 + WIN_SECURITY_UNLOCK = 0xf2 + WIN_SEEK = 0x70 + WIN_SETFEATURES = 0xef + WIN_SETIDLE1 = 0xe3 + WIN_SETIDLE2 = 0x97 + WIN_SETMULT = 0xc6 + WIN_SET_MAX = 0xf9 + WIN_SET_MAX_EXT = 0x37 + WIN_SLEEPNOW1 = 0xe6 + WIN_SLEEPNOW2 = 0x99 + WIN_SMART = 0xb0 + WIN_SPECIFY = 0x91 + WIN_SRST = 0x8 + WIN_STANDBY = 0xe2 + WIN_STANDBY2 = 0x96 + WIN_STANDBYNOW1 = 0xe0 + WIN_STANDBYNOW2 = 0x94 + WIN_VERIFY = 0x40 + WIN_VERIFY_EXT = 0x42 + WIN_VERIFY_ONCE = 0x41 + WIN_WRITE = 0x30 + WIN_WRITEDMA = 0xca + WIN_WRITEDMA_EXT = 0x35 + WIN_WRITEDMA_ONCE = 0xcb + WIN_WRITEDMA_QUEUED = 0xcc + WIN_WRITEDMA_QUEUED_EXT = 0x36 + WIN_WRITE_BUFFER = 0xe8 + WIN_WRITE_EXT = 0x34 + WIN_WRITE_LONG = 0x32 + WIN_WRITE_LONG_ONCE = 0x33 + WIN_WRITE_ONCE = 0x31 + WIN_WRITE_SAME = 0xe9 + WIN_WRITE_VERIFY = 0x3c WNOHANG = 0x1 WNOTHREAD = 0x20000000 WNOWAIT = 0x1000000 @@ -1953,7 +2208,9 @@ const ( XATTR_CREATE = 0x1 XATTR_REPLACE = 0x2 XCASE = 0x4 + XENFS_SUPER_MAGIC = 0xabba1974 XTABS = 0x1800 + ZSMALLOC_MAGIC = 0x58295829 ) // Errors @@ -2135,174 +2392,182 @@ const ( ) // Error table -var errors = [...]string{ - 1: "operation not permitted", - 2: "no such file or directory", - 3: "no such process", - 4: "interrupted system call", - 5: "input/output error", - 6: "no such device or address", - 7: "argument list too long", - 8: "exec format error", - 9: "bad file descriptor", - 10: "no child processes", - 11: "resource temporarily unavailable", - 12: "cannot allocate memory", - 13: "permission denied", - 14: "bad address", - 15: "block device required", - 16: "device or resource busy", - 17: "file exists", - 18: "invalid cross-device link", - 19: "no such device", - 20: "not a directory", - 21: "is a directory", - 22: "invalid argument", - 23: "too many open files in system", - 24: "too many open files", - 25: "inappropriate ioctl for device", - 26: "text file busy", - 27: "file too large", - 28: "no space left on device", - 29: "illegal seek", - 30: "read-only file system", - 31: "too many links", - 32: "broken pipe", - 33: "numerical argument out of domain", - 34: "numerical result out of range", - 35: "no message of desired type", - 36: "identifier removed", - 37: "channel number out of range", - 38: "level 2 not synchronized", - 39: "level 3 halted", - 40: "level 3 reset", - 41: "link number out of range", - 42: "protocol driver not attached", - 43: "no CSI structure available", - 44: "level 2 halted", - 45: "resource deadlock avoided", - 46: "no locks available", - 50: "invalid exchange", - 51: "invalid request descriptor", - 52: "exchange full", - 53: "no anode", - 54: "invalid request code", - 55: "invalid slot", - 56: "file locking deadlock error", - 59: "bad font file format", - 60: "device not a stream", - 61: "no data available", - 62: "timer expired", - 63: "out of streams resources", - 64: "machine is not on the network", - 65: "package not installed", - 66: "object is remote", - 67: "link has been severed", - 68: "advertise error", - 69: "srmount error", - 70: "communication error on send", - 71: "protocol error", - 73: "RFS specific error", - 74: "multihop attempted", - 77: "bad message", - 78: "file name too long", - 79: "value too large for defined data type", - 80: "name not unique on network", - 81: "file descriptor in bad state", - 82: "remote address changed", - 83: "can not access a needed shared library", - 84: "accessing a corrupted shared library", - 85: ".lib section in a.out corrupted", - 86: "attempting to link in too many shared libraries", - 87: "cannot exec a shared library directly", - 88: "invalid or incomplete multibyte or wide character", - 89: "function not implemented", - 90: "too many levels of symbolic links", - 91: "interrupted system call should be restarted", - 92: "streams pipe error", - 93: "directory not empty", - 94: "too many users", - 95: "socket operation on non-socket", - 96: "destination address required", - 97: "message too long", - 98: "protocol wrong type for socket", - 99: "protocol not available", - 120: "protocol not supported", - 121: "socket type not supported", - 122: "operation not supported", - 123: "protocol family not supported", - 124: "address family not supported by protocol", - 125: "address already in use", - 126: "cannot assign requested address", - 127: "network is down", - 128: "network is unreachable", - 129: "network dropped connection on reset", - 130: "software caused connection abort", - 131: "connection reset by peer", - 132: "no buffer space available", - 133: "transport endpoint is already connected", - 134: "transport endpoint is not connected", - 135: "structure needs cleaning", - 137: "not a XENIX named type file", - 138: "no XENIX semaphores available", - 139: "is a named type file", - 140: "remote I/O error", - 141: "unknown error 141", - 142: "unknown error 142", - 143: "cannot send after transport endpoint shutdown", - 144: "too many references: cannot splice", - 145: "connection timed out", - 146: "connection refused", - 147: "host is down", - 148: "no route to host", - 149: "operation already in progress", - 150: "operation now in progress", - 151: "stale file handle", - 158: "operation canceled", - 159: "no medium found", - 160: "wrong medium type", - 161: "required key not available", - 162: "key has expired", - 163: "key has been revoked", - 164: "key was rejected by service", - 165: "owner died", - 166: "state not recoverable", - 167: "operation not possible due to RF-kill", - 168: "memory page has hardware error", - 1133: "disk quota exceeded", +var errorList = [...]struct { + num syscall.Errno + name string + desc string +}{ + {1, "EPERM", "operation not permitted"}, + {2, "ENOENT", "no such file or directory"}, + {3, "ESRCH", "no such process"}, + {4, "EINTR", "interrupted system call"}, + {5, "EIO", "input/output error"}, + {6, "ENXIO", "no such device or address"}, + {7, "E2BIG", "argument list too long"}, + {8, "ENOEXEC", "exec format error"}, + {9, "EBADF", "bad file descriptor"}, + {10, "ECHILD", "no child processes"}, + {11, "EAGAIN", "resource temporarily unavailable"}, + {12, "ENOMEM", "cannot allocate memory"}, + {13, "EACCES", "permission denied"}, + {14, "EFAULT", "bad address"}, + {15, "ENOTBLK", "block device required"}, + {16, "EBUSY", "device or resource busy"}, + {17, "EEXIST", "file exists"}, + {18, "EXDEV", "invalid cross-device link"}, + {19, "ENODEV", "no such device"}, + {20, "ENOTDIR", "not a directory"}, + {21, "EISDIR", "is a directory"}, + {22, "EINVAL", "invalid argument"}, + {23, "ENFILE", "too many open files in system"}, + {24, "EMFILE", "too many open files"}, + {25, "ENOTTY", "inappropriate ioctl for device"}, + {26, "ETXTBSY", "text file busy"}, + {27, "EFBIG", "file too large"}, + {28, "ENOSPC", "no space left on device"}, + {29, "ESPIPE", "illegal seek"}, + {30, "EROFS", "read-only file system"}, + {31, "EMLINK", "too many links"}, + {32, "EPIPE", "broken pipe"}, + {33, "EDOM", "numerical argument out of domain"}, + {34, "ERANGE", "numerical result out of range"}, + {35, "ENOMSG", "no message of desired type"}, + {36, "EIDRM", "identifier removed"}, + {37, "ECHRNG", "channel number out of range"}, + {38, "EL2NSYNC", "level 2 not synchronized"}, + {39, "EL3HLT", "level 3 halted"}, + {40, "EL3RST", "level 3 reset"}, + {41, "ELNRNG", "link number out of range"}, + {42, "EUNATCH", "protocol driver not attached"}, + {43, "ENOCSI", "no CSI structure available"}, + {44, "EL2HLT", "level 2 halted"}, + {45, "EDEADLK", "resource deadlock avoided"}, + {46, "ENOLCK", "no locks available"}, + {50, "EBADE", "invalid exchange"}, + {51, "EBADR", "invalid request descriptor"}, + {52, "EXFULL", "exchange full"}, + {53, "ENOANO", "no anode"}, + {54, "EBADRQC", "invalid request code"}, + {55, "EBADSLT", "invalid slot"}, + {56, "EDEADLOCK", "file locking deadlock error"}, + {59, "EBFONT", "bad font file format"}, + {60, "ENOSTR", "device not a stream"}, + {61, "ENODATA", "no data available"}, + {62, "ETIME", "timer expired"}, + {63, "ENOSR", "out of streams resources"}, + {64, "ENONET", "machine is not on the network"}, + {65, "ENOPKG", "package not installed"}, + {66, "EREMOTE", "object is remote"}, + {67, "ENOLINK", "link has been severed"}, + {68, "EADV", "advertise error"}, + {69, "ESRMNT", "srmount error"}, + {70, "ECOMM", "communication error on send"}, + {71, "EPROTO", "protocol error"}, + {73, "EDOTDOT", "RFS specific error"}, + {74, "EMULTIHOP", "multihop attempted"}, + {77, "EBADMSG", "bad message"}, + {78, "ENAMETOOLONG", "file name too long"}, + {79, "EOVERFLOW", "value too large for defined data type"}, + {80, "ENOTUNIQ", "name not unique on network"}, + {81, "EBADFD", "file descriptor in bad state"}, + {82, "EREMCHG", "remote address changed"}, + {83, "ELIBACC", "can not access a needed shared library"}, + {84, "ELIBBAD", "accessing a corrupted shared library"}, + {85, "ELIBSCN", ".lib section in a.out corrupted"}, + {86, "ELIBMAX", "attempting to link in too many shared libraries"}, + {87, "ELIBEXEC", "cannot exec a shared library directly"}, + {88, "EILSEQ", "invalid or incomplete multibyte or wide character"}, + {89, "ENOSYS", "function not implemented"}, + {90, "ELOOP", "too many levels of symbolic links"}, + {91, "ERESTART", "interrupted system call should be restarted"}, + {92, "ESTRPIPE", "streams pipe error"}, + {93, "ENOTEMPTY", "directory not empty"}, + {94, "EUSERS", "too many users"}, + {95, "ENOTSOCK", "socket operation on non-socket"}, + {96, "EDESTADDRREQ", "destination address required"}, + {97, "EMSGSIZE", "message too long"}, + {98, "EPROTOTYPE", "protocol wrong type for socket"}, + {99, "ENOPROTOOPT", "protocol not available"}, + {120, "EPROTONOSUPPORT", "protocol not supported"}, + {121, "ESOCKTNOSUPPORT", "socket type not supported"}, + {122, "ENOTSUP", "operation not supported"}, + {123, "EPFNOSUPPORT", "protocol family not supported"}, + {124, "EAFNOSUPPORT", "address family not supported by protocol"}, + {125, "EADDRINUSE", "address already in use"}, + {126, "EADDRNOTAVAIL", "cannot assign requested address"}, + {127, "ENETDOWN", "network is down"}, + {128, "ENETUNREACH", "network is unreachable"}, + {129, "ENETRESET", "network dropped connection on reset"}, + {130, "ECONNABORTED", "software caused connection abort"}, + {131, "ECONNRESET", "connection reset by peer"}, + {132, "ENOBUFS", "no buffer space available"}, + {133, "EISCONN", "transport endpoint is already connected"}, + {134, "ENOTCONN", "transport endpoint is not connected"}, + {135, "EUCLEAN", "structure needs cleaning"}, + {137, "ENOTNAM", "not a XENIX named type file"}, + {138, "ENAVAIL", "no XENIX semaphores available"}, + {139, "EISNAM", "is a named type file"}, + {140, "EREMOTEIO", "remote I/O error"}, + {141, "EINIT", "unknown error 141"}, + {142, "EREMDEV", "unknown error 142"}, + {143, "ESHUTDOWN", "cannot send after transport endpoint shutdown"}, + {144, "ETOOMANYREFS", "too many references: cannot splice"}, + {145, "ETIMEDOUT", "connection timed out"}, + {146, "ECONNREFUSED", "connection refused"}, + {147, "EHOSTDOWN", "host is down"}, + {148, "EHOSTUNREACH", "no route to host"}, + {149, "EALREADY", "operation already in progress"}, + {150, "EINPROGRESS", "operation now in progress"}, + {151, "ESTALE", "stale file handle"}, + {158, "ECANCELED", "operation canceled"}, + {159, "ENOMEDIUM", "no medium found"}, + {160, "EMEDIUMTYPE", "wrong medium type"}, + {161, "ENOKEY", "required key not available"}, + {162, "EKEYEXPIRED", "key has expired"}, + {163, "EKEYREVOKED", "key has been revoked"}, + {164, "EKEYREJECTED", "key was rejected by service"}, + {165, "EOWNERDEAD", "owner died"}, + {166, "ENOTRECOVERABLE", "state not recoverable"}, + {167, "ERFKILL", "operation not possible due to RF-kill"}, + {168, "EHWPOISON", "memory page has hardware error"}, + {1133, "EDQUOT", "disk quota exceeded"}, } // Signal table -var signals = [...]string{ - 1: "hangup", - 2: "interrupt", - 3: "quit", - 4: "illegal instruction", - 5: "trace/breakpoint trap", - 6: "aborted", - 7: "EMT trap", - 8: "floating point exception", - 9: "killed", - 10: "bus error", - 11: "segmentation fault", - 12: "bad system call", - 13: "broken pipe", - 14: "alarm clock", - 15: "terminated", - 16: "user defined signal 1", - 17: "user defined signal 2", - 18: "child exited", - 19: "power failure", - 20: "window changed", - 21: "urgent I/O condition", - 22: "I/O possible", - 23: "stopped (signal)", - 24: "stopped", - 25: "continued", - 26: "stopped (tty input)", - 27: "stopped (tty output)", - 28: "virtual timer expired", - 29: "profiling timer expired", - 30: "CPU time limit exceeded", - 31: "file size limit exceeded", +var signalList = [...]struct { + num syscall.Signal + name string + desc string +}{ + {1, "SIGHUP", "hangup"}, + {2, "SIGINT", "interrupt"}, + {3, "SIGQUIT", "quit"}, + {4, "SIGILL", "illegal instruction"}, + {5, "SIGTRAP", "trace/breakpoint trap"}, + {6, "SIGABRT", "aborted"}, + {7, "SIGEMT", "EMT trap"}, + {8, "SIGFPE", "floating point exception"}, + {9, "SIGKILL", "killed"}, + {10, "SIGBUS", "bus error"}, + {11, "SIGSEGV", "segmentation fault"}, + {12, "SIGSYS", "bad system call"}, + {13, "SIGPIPE", "broken pipe"}, + {14, "SIGALRM", "alarm clock"}, + {15, "SIGTERM", "terminated"}, + {16, "SIGUSR1", "user defined signal 1"}, + {17, "SIGUSR2", "user defined signal 2"}, + {18, "SIGCHLD", "child exited"}, + {19, "SIGPWR", "power failure"}, + {20, "SIGWINCH", "window changed"}, + {21, "SIGURG", "urgent I/O condition"}, + {22, "SIGIO", "I/O possible"}, + {23, "SIGSTOP", "stopped (signal)"}, + {24, "SIGTSTP", "stopped"}, + {25, "SIGCONT", "continued"}, + {26, "SIGTTIN", "stopped (tty input)"}, + {27, "SIGTTOU", "stopped (tty output)"}, + {28, "SIGVTALRM", "virtual timer expired"}, + {29, "SIGPROF", "profiling timer expired"}, + {30, "SIGXCPU", "CPU time limit exceeded"}, + {31, "SIGXFSZ", "file size limit exceeded"}, } diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go index b9b9d6374fe..d04a43b2bc7 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go @@ -11,6 +11,11 @@ package unix import "syscall" const ( + AAFS_MAGIC = 0x5a3c69f0 + ADFS_SUPER_MAGIC = 0xadf5 + AFFS_SUPER_MAGIC = 0xadff + AFS_FS_MAGIC = 0x6b414653 + AFS_SUPER_MAGIC = 0x5346414f AF_ALG = 0x26 AF_APPLETALK = 0x5 AF_ASH = 0x12 @@ -66,6 +71,7 @@ const ( ALG_SET_IV = 0x2 ALG_SET_KEY = 0x1 ALG_SET_OP = 0x3 + ANON_INODE_FS_MAGIC = 0x9041934 ARPHRD_6LOWPAN = 0x339 ARPHRD_ADAPT = 0x108 ARPHRD_APPLETLK = 0x8 @@ -133,6 +139,7 @@ const ( ARPHRD_VOID = 0xffff ARPHRD_VSOCKMON = 0x33a ARPHRD_X25 = 0x10f + AUTOFS_SUPER_MAGIC = 0x187 B0 = 0x0 B1000000 = 0x1008 B110 = 0x3 @@ -164,6 +171,9 @@ const ( B75 = 0x2 B921600 = 0x1007 B9600 = 0xd + BALLOON_KVM_MAGIC = 0x13661366 + BDEVFS_MAGIC = 0x62646576 + BINFMTFS_MAGIC = 0x42494e4d BLKBSZGET = 0x40041270 BLKBSZSET = 0x80041271 BLKFLSBUF = 0x20001261 @@ -188,6 +198,7 @@ const ( BPF_AND = 0x50 BPF_B = 0x10 BPF_DIV = 0x30 + BPF_FS_MAGIC = 0xcafe4a11 BPF_H = 0x8 BPF_IMM = 0x0 BPF_IND = 0x40 @@ -229,6 +240,8 @@ const ( BS0 = 0x0 BS1 = 0x2000 BSDLY = 0x2000 + BTRFS_SUPER_MAGIC = 0x9123683e + BTRFS_TEST_MAGIC = 0x73727279 CAN_BCM = 0x2 CAN_EFF_FLAG = 0x80000000 CAN_EFF_ID_BITS = 0x1d @@ -252,6 +265,8 @@ const ( CBAUD = 0x100f CBAUDEX = 0x1000 CFLUSH = 0xf + CGROUP2_SUPER_MAGIC = 0x63677270 + CGROUP_SUPER_MAGIC = 0x27e0eb CIBAUD = 0x100f0000 CLOCAL = 0x800 CLOCK_BOOTTIME = 0x7 @@ -294,10 +309,12 @@ const ( CLONE_VFORK = 0x4000 CLONE_VM = 0x100 CMSPAR = 0x40000000 + CODA_SUPER_MAGIC = 0x73757245 CR0 = 0x0 CR1 = 0x200 CR2 = 0x400 CR3 = 0x600 + CRAMFS_MAGIC = 0x28cd3d45 CRDLY = 0x600 CREAD = 0x80 CRTSCTS = 0x80000000 @@ -312,6 +329,9 @@ const ( CSTOP = 0x13 CSTOPB = 0x40 CSUSP = 0x1a + DAXFS_MAGIC = 0x64646178 + DEBUGFS_MAGIC = 0x64626720 + DEVPTS_SUPER_MAGIC = 0x1cd1 DT_BLK = 0x6 DT_CHR = 0x2 DT_DIR = 0x4 @@ -328,9 +348,12 @@ const ( ECHOKE = 0x800 ECHONL = 0x40 ECHOPRT = 0x400 + ECRYPTFS_SUPER_MAGIC = 0xf15f EFD_CLOEXEC = 0x80000 EFD_NONBLOCK = 0x80 EFD_SEMAPHORE = 0x1 + EFIVARFS_MAGIC = 0xde5e81e4 + EFS_SUPER_MAGIC = 0x414a53 ENCODING_DEFAULT = 0x0 ENCODING_FM_MARK = 0x3 ENCODING_FM_SPACE = 0x4 @@ -392,6 +415,7 @@ const ( ETH_P_ECONET = 0x18 ETH_P_EDSA = 0xdada ETH_P_ERSPAN = 0x88be + ETH_P_ERSPAN2 = 0x22eb ETH_P_FCOE = 0x8906 ETH_P_FIP = 0x8914 ETH_P_HDLC = 0x19 @@ -445,9 +469,14 @@ const ( ETH_P_WCCP = 0x883e ETH_P_X25 = 0x805 ETH_P_XDSA = 0xf8 + EXABYTE_ENABLE_NEST = 0xf0 + EXT2_SUPER_MAGIC = 0xef53 + EXT3_SUPER_MAGIC = 0xef53 + EXT4_SUPER_MAGIC = 0xef53 EXTA = 0xe EXTB = 0xf EXTPROC = 0x10000 + F2FS_SUPER_MAGIC = 0xf2f52010 FALLOC_FL_COLLAPSE_RANGE = 0x8 FALLOC_FL_INSERT_RANGE = 0x20 FALLOC_FL_KEEP_SIZE = 0x1 @@ -481,6 +510,7 @@ const ( FS_POLICY_FLAGS_PAD_8 = 0x1 FS_POLICY_FLAGS_PAD_MASK = 0x3 FS_POLICY_FLAGS_VALID = 0x3 + FUTEXFS_SUPER_MAGIC = 0xbad1dea F_ADD_SEALS = 0x409 F_DUPFD = 0x0 F_DUPFD_CLOEXEC = 0x406 @@ -542,6 +572,49 @@ const ( GENL_UNS_ADMIN_PERM = 0x10 GRND_NONBLOCK = 0x1 GRND_RANDOM = 0x2 + HDIO_DRIVE_CMD = 0x31f + HDIO_DRIVE_CMD_AEB = 0x31e + HDIO_DRIVE_CMD_HDR_SIZE = 0x4 + HDIO_DRIVE_HOB_HDR_SIZE = 0x8 + HDIO_DRIVE_RESET = 0x31c + HDIO_DRIVE_TASK = 0x31e + HDIO_DRIVE_TASKFILE = 0x31d + HDIO_DRIVE_TASK_HDR_SIZE = 0x8 + HDIO_GETGEO = 0x301 + HDIO_GET_32BIT = 0x309 + HDIO_GET_ACOUSTIC = 0x30f + HDIO_GET_ADDRESS = 0x310 + HDIO_GET_BUSSTATE = 0x31a + HDIO_GET_DMA = 0x30b + HDIO_GET_IDENTITY = 0x30d + HDIO_GET_KEEPSETTINGS = 0x308 + HDIO_GET_MULTCOUNT = 0x304 + HDIO_GET_NICE = 0x30c + HDIO_GET_NOWERR = 0x30a + HDIO_GET_QDMA = 0x305 + HDIO_GET_UNMASKINTR = 0x302 + HDIO_GET_WCACHE = 0x30e + HDIO_OBSOLETE_IDENTITY = 0x307 + HDIO_SCAN_HWIF = 0x328 + HDIO_SET_32BIT = 0x324 + HDIO_SET_ACOUSTIC = 0x32c + HDIO_SET_ADDRESS = 0x32f + HDIO_SET_BUSSTATE = 0x32d + HDIO_SET_DMA = 0x326 + HDIO_SET_KEEPSETTINGS = 0x323 + HDIO_SET_MULTCOUNT = 0x321 + HDIO_SET_NICE = 0x329 + HDIO_SET_NOWERR = 0x325 + HDIO_SET_PIO_MODE = 0x327 + HDIO_SET_QDMA = 0x32e + HDIO_SET_UNMASKINTR = 0x322 + HDIO_SET_WCACHE = 0x32b + HDIO_SET_XFER = 0x306 + HDIO_TRISTATE_HWIF = 0x31b + HDIO_UNREGISTER_HWIF = 0x32a + HOSTFS_SUPER_MAGIC = 0xc0ffee + HPFS_SUPER_MAGIC = 0xf995e849 + HUGETLBFS_MAGIC = 0x958458f6 HUPCL = 0x400 IBSHIFT = 0x10 ICANON = 0x2 @@ -792,12 +865,14 @@ const ( IP_UNICAST_IF = 0x32 IP_XFRM_POLICY = 0x11 ISIG = 0x1 + ISOFS_SUPER_MAGIC = 0x9660 ISTRIP = 0x20 IUCLC = 0x200 IUTF8 = 0x4000 IXANY = 0x800 IXOFF = 0x1000 IXON = 0x400 + JFFS2_SUPER_MAGIC = 0x72b6 KEYCTL_ASSUME_AUTHORITY = 0x10 KEYCTL_CHOWN = 0x4 KEYCTL_CLEAR = 0x7 @@ -894,9 +969,15 @@ const ( MCL_CURRENT = 0x1 MCL_FUTURE = 0x2 MCL_ONFAULT = 0x4 + MINIX2_SUPER_MAGIC = 0x2468 + MINIX2_SUPER_MAGIC2 = 0x2478 + MINIX3_SUPER_MAGIC = 0x4d5a + MINIX_SUPER_MAGIC = 0x137f + MINIX_SUPER_MAGIC2 = 0x138f MNT_DETACH = 0x2 MNT_EXPIRE = 0x4 MNT_FORCE = 0x1 + MSDOS_SUPER_MAGIC = 0x4d44 MSG_BATCH = 0x40000 MSG_CMSG_CLOEXEC = 0x40000000 MSG_CONFIRM = 0x800 @@ -956,7 +1037,9 @@ const ( MS_SYNCHRONOUS = 0x10 MS_UNBINDABLE = 0x20000 MS_VERBOSE = 0x8000 + MTD_INODE_FS_MAGIC = 0x11307854 NAME_MAX = 0xff + NCP_SUPER_MAGIC = 0x564c NETLINK_ADD_MEMBERSHIP = 0x1 NETLINK_AUDIT = 0x9 NETLINK_BROADCAST_ERROR = 0x4 @@ -991,6 +1074,37 @@ const ( NETLINK_UNUSED = 0x1 NETLINK_USERSOCK = 0x2 NETLINK_XFRM = 0x6 + NFNETLINK_V0 = 0x0 + NFNLGRP_ACCT_QUOTA = 0x8 + NFNLGRP_CONNTRACK_DESTROY = 0x3 + NFNLGRP_CONNTRACK_EXP_DESTROY = 0x6 + NFNLGRP_CONNTRACK_EXP_NEW = 0x4 + NFNLGRP_CONNTRACK_EXP_UPDATE = 0x5 + NFNLGRP_CONNTRACK_NEW = 0x1 + NFNLGRP_CONNTRACK_UPDATE = 0x2 + NFNLGRP_MAX = 0x9 + NFNLGRP_NFTABLES = 0x7 + NFNLGRP_NFTRACE = 0x9 + NFNLGRP_NONE = 0x0 + NFNL_BATCH_MAX = 0x1 + NFNL_MSG_BATCH_BEGIN = 0x10 + NFNL_MSG_BATCH_END = 0x11 + NFNL_NFA_NEST = 0x8000 + NFNL_SUBSYS_ACCT = 0x7 + NFNL_SUBSYS_COUNT = 0xc + NFNL_SUBSYS_CTHELPER = 0x9 + NFNL_SUBSYS_CTNETLINK = 0x1 + NFNL_SUBSYS_CTNETLINK_EXP = 0x2 + NFNL_SUBSYS_CTNETLINK_TIMEOUT = 0x8 + NFNL_SUBSYS_IPSET = 0x6 + NFNL_SUBSYS_NFTABLES = 0xa + NFNL_SUBSYS_NFT_COMPAT = 0xb + NFNL_SUBSYS_NONE = 0x0 + NFNL_SUBSYS_OSF = 0x5 + NFNL_SUBSYS_QUEUE = 0x3 + NFNL_SUBSYS_ULOG = 0x4 + NFS_SUPER_MAGIC = 0x6969 + NILFS_SUPER_MAGIC = 0x3434 NL0 = 0x0 NL1 = 0x100 NLA_ALIGNTO = 0x4 @@ -1023,6 +1137,8 @@ const ( NLM_F_REQUEST = 0x1 NLM_F_ROOT = 0x100 NOFLSH = 0x80 + NSFS_MAGIC = 0x6e736673 + OCFS2_SUPER_MAGIC = 0x7461636f OCRNL = 0x8 OFDEL = 0x80 OFILL = 0x40 @@ -1030,7 +1146,9 @@ const ( ONLCR = 0x4 ONLRET = 0x20 ONOCR = 0x10 + OPENPROM_SUPER_MAGIC = 0x9fa1 OPOST = 0x1 + OVERLAYFS_SUPER_MAGIC = 0x794c7630 O_ACCMODE = 0x3 O_APPEND = 0x8 O_ASYNC = 0x1000 @@ -1117,14 +1235,17 @@ const ( PERF_EVENT_IOC_ID = 0x40042407 PERF_EVENT_IOC_PAUSE_OUTPUT = 0x80042409 PERF_EVENT_IOC_PERIOD = 0x80082404 + PERF_EVENT_IOC_QUERY_BPF = 0xc004240a PERF_EVENT_IOC_REFRESH = 0x20002402 PERF_EVENT_IOC_RESET = 0x20002403 PERF_EVENT_IOC_SET_BPF = 0x80042408 PERF_EVENT_IOC_SET_FILTER = 0x80042406 PERF_EVENT_IOC_SET_OUTPUT = 0x20002405 + PIPEFS_MAGIC = 0x50495045 PRIO_PGRP = 0x1 PRIO_PROCESS = 0x0 PRIO_USER = 0x2 + PROC_SUPER_MAGIC = 0x9fa0 PROT_EXEC = 0x4 PROT_GROWSDOWN = 0x1000000 PROT_GROWSUP = 0x2000000 @@ -1230,6 +1351,7 @@ const ( PR_TSC_SIGSEGV = 0x2 PR_UNALIGN_NOPRINT = 0x1 PR_UNALIGN_SIGBUS = 0x2 + PSTOREFS_MAGIC = 0x6165676c PTRACE_ATTACH = 0x10 PTRACE_CONT = 0x7 PTRACE_DETACH = 0x11 @@ -1278,6 +1400,7 @@ const ( PTRACE_POKETEXT_3264 = 0xc2 PTRACE_POKEUSR = 0x6 PTRACE_SECCOMP_GET_FILTER = 0x420c + PTRACE_SECCOMP_GET_METADATA = 0x420d PTRACE_SEIZE = 0x4206 PTRACE_SETFPREGS = 0xf PTRACE_SETOPTIONS = 0x4200 @@ -1290,6 +1413,11 @@ const ( PTRACE_SINGLESTEP = 0x9 PTRACE_SYSCALL = 0x18 PTRACE_TRACEME = 0x0 + QNX4_SUPER_MAGIC = 0x2f + QNX6_SUPER_MAGIC = 0x68191122 + RAMFS_MAGIC = 0x858458f6 + RDTGROUP_SUPER_MAGIC = 0x7655821 + REISERFS_SUPER_MAGIC = 0x52654973 RLIMIT_AS = 0x6 RLIMIT_CORE = 0x4 RLIMIT_CPU = 0x0 @@ -1475,6 +1603,8 @@ const ( SECCOMP_MODE_DISABLED = 0x0 SECCOMP_MODE_FILTER = 0x2 SECCOMP_MODE_STRICT = 0x1 + SECURITYFS_MAGIC = 0x73636673 + SELINUX_MAGIC = 0xf97cff8c SHUT_RD = 0x0 SHUT_RDWR = 0x2 SHUT_WR = 0x1 @@ -1559,6 +1689,23 @@ const ( SIOCSPGRP = 0x80047308 SIOCSRARP = 0x8962 SIOCWANDEV = 0x894a + SMACK_MAGIC = 0x43415d53 + SMART_AUTOSAVE = 0xd2 + SMART_AUTO_OFFLINE = 0xdb + SMART_DISABLE = 0xd9 + SMART_ENABLE = 0xd8 + SMART_HCYL_PASS = 0xc2 + SMART_IMMEDIATE_OFFLINE = 0xd4 + SMART_LCYL_PASS = 0x4f + SMART_READ_LOG_SECTOR = 0xd5 + SMART_READ_THRESHOLDS = 0xd1 + SMART_READ_VALUES = 0xd0 + SMART_SAVE = 0xd3 + SMART_STATUS = 0xda + SMART_WRITE_LOG_SECTOR = 0xd6 + SMART_WRITE_THRESHOLDS = 0xd7 + SMB_SUPER_MAGIC = 0x517b + SOCKFS_MAGIC = 0x534f434b SOCK_CLOEXEC = 0x80000 SOCK_DCCP = 0x6 SOCK_DGRAM = 0x1 @@ -1670,6 +1817,8 @@ const ( SPLICE_F_MORE = 0x4 SPLICE_F_MOVE = 0x1 SPLICE_F_NONBLOCK = 0x2 + SQUASHFS_MAGIC = 0x73717368 + STACK_END_MAGIC = 0x57ac6e9d STATX_ALL = 0xfff STATX_ATIME = 0x20 STATX_ATTR_APPEND = 0x20 @@ -1691,6 +1840,7 @@ const ( STATX_TYPE = 0x1 STATX_UID = 0x8 STATX__RESERVED = 0x80000000 + SYSFS_MAGIC = 0x62656572 S_BLKSIZE = 0x200 S_IEXEC = 0x40 S_IFBLK = 0x6000 @@ -1873,7 +2023,27 @@ const ( TIOCSTI = 0x5472 TIOCSWINSZ = 0x80087467 TIOCVHANGUP = 0x5437 + TMPFS_MAGIC = 0x1021994 TOSTOP = 0x8000 + TPACKET_ALIGNMENT = 0x10 + TPACKET_HDRLEN = 0x34 + TP_STATUS_AVAILABLE = 0x0 + TP_STATUS_BLK_TMO = 0x20 + TP_STATUS_COPY = 0x2 + TP_STATUS_CSUMNOTREADY = 0x8 + TP_STATUS_CSUM_VALID = 0x80 + TP_STATUS_KERNEL = 0x0 + TP_STATUS_LOSING = 0x4 + TP_STATUS_SENDING = 0x2 + TP_STATUS_SEND_REQUEST = 0x1 + TP_STATUS_TS_RAW_HARDWARE = -0x80000000 + TP_STATUS_TS_SOFTWARE = 0x20000000 + TP_STATUS_TS_SYS_HARDWARE = 0x40000000 + TP_STATUS_USER = 0x1 + TP_STATUS_VLAN_TPID_VALID = 0x40 + TP_STATUS_VLAN_VALID = 0x10 + TP_STATUS_WRONG_FORMAT = 0x4 + TRACEFS_MAGIC = 0x74726163 TS_COMM_LEN = 0x20 TUNATTACHFILTER = 0x800854d5 TUNDETACHFILTER = 0x800854d6 @@ -1885,6 +2055,7 @@ const ( TUNGETVNETHDRSZ = 0x400454d7 TUNGETVNETLE = 0x400454dd TUNSETDEBUG = 0x800454c9 + TUNSETFILTEREBPF = 0x400454e1 TUNSETGROUP = 0x800454ce TUNSETIFF = 0x800454ca TUNSETIFINDEX = 0x800454da @@ -1895,13 +2066,17 @@ const ( TUNSETPERSIST = 0x800454cb TUNSETQUEUE = 0x800454d9 TUNSETSNDBUF = 0x800454d4 + TUNSETSTEERINGEBPF = 0x400454e0 TUNSETTXFILTER = 0x800454d1 TUNSETVNETBE = 0x800454de TUNSETVNETHDRSZ = 0x800454d8 TUNSETVNETLE = 0x800454dc + UDF_SUPER_MAGIC = 0x15013346 UMOUNT_NOFOLLOW = 0x8 + USBDEVICE_SUPER_MAGIC = 0x9fa2 UTIME_NOW = 0x3fffffff UTIME_OMIT = 0x3ffffffe + V9FS_MAGIC = 0x1021997 VDISCARD = 0xd VEOF = 0x10 VEOL = 0x11 @@ -1944,6 +2119,86 @@ const ( WDIOC_SETPRETIMEOUT = 0xc0045708 WDIOC_SETTIMEOUT = 0xc0045706 WEXITED = 0x4 + WIN_ACKMEDIACHANGE = 0xdb + WIN_CHECKPOWERMODE1 = 0xe5 + WIN_CHECKPOWERMODE2 = 0x98 + WIN_DEVICE_RESET = 0x8 + WIN_DIAGNOSE = 0x90 + WIN_DOORLOCK = 0xde + WIN_DOORUNLOCK = 0xdf + WIN_DOWNLOAD_MICROCODE = 0x92 + WIN_FLUSH_CACHE = 0xe7 + WIN_FLUSH_CACHE_EXT = 0xea + WIN_FORMAT = 0x50 + WIN_GETMEDIASTATUS = 0xda + WIN_IDENTIFY = 0xec + WIN_IDENTIFY_DMA = 0xee + WIN_IDLEIMMEDIATE = 0xe1 + WIN_INIT = 0x60 + WIN_MEDIAEJECT = 0xed + WIN_MULTREAD = 0xc4 + WIN_MULTREAD_EXT = 0x29 + WIN_MULTWRITE = 0xc5 + WIN_MULTWRITE_EXT = 0x39 + WIN_NOP = 0x0 + WIN_PACKETCMD = 0xa0 + WIN_PIDENTIFY = 0xa1 + WIN_POSTBOOT = 0xdc + WIN_PREBOOT = 0xdd + WIN_QUEUED_SERVICE = 0xa2 + WIN_READ = 0x20 + WIN_READDMA = 0xc8 + WIN_READDMA_EXT = 0x25 + WIN_READDMA_ONCE = 0xc9 + WIN_READDMA_QUEUED = 0xc7 + WIN_READDMA_QUEUED_EXT = 0x26 + WIN_READ_BUFFER = 0xe4 + WIN_READ_EXT = 0x24 + WIN_READ_LONG = 0x22 + WIN_READ_LONG_ONCE = 0x23 + WIN_READ_NATIVE_MAX = 0xf8 + WIN_READ_NATIVE_MAX_EXT = 0x27 + WIN_READ_ONCE = 0x21 + WIN_RECAL = 0x10 + WIN_RESTORE = 0x10 + WIN_SECURITY_DISABLE = 0xf6 + WIN_SECURITY_ERASE_PREPARE = 0xf3 + WIN_SECURITY_ERASE_UNIT = 0xf4 + WIN_SECURITY_FREEZE_LOCK = 0xf5 + WIN_SECURITY_SET_PASS = 0xf1 + WIN_SECURITY_UNLOCK = 0xf2 + WIN_SEEK = 0x70 + WIN_SETFEATURES = 0xef + WIN_SETIDLE1 = 0xe3 + WIN_SETIDLE2 = 0x97 + WIN_SETMULT = 0xc6 + WIN_SET_MAX = 0xf9 + WIN_SET_MAX_EXT = 0x37 + WIN_SLEEPNOW1 = 0xe6 + WIN_SLEEPNOW2 = 0x99 + WIN_SMART = 0xb0 + WIN_SPECIFY = 0x91 + WIN_SRST = 0x8 + WIN_STANDBY = 0xe2 + WIN_STANDBY2 = 0x96 + WIN_STANDBYNOW1 = 0xe0 + WIN_STANDBYNOW2 = 0x94 + WIN_VERIFY = 0x40 + WIN_VERIFY_EXT = 0x42 + WIN_VERIFY_ONCE = 0x41 + WIN_WRITE = 0x30 + WIN_WRITEDMA = 0xca + WIN_WRITEDMA_EXT = 0x35 + WIN_WRITEDMA_ONCE = 0xcb + WIN_WRITEDMA_QUEUED = 0xcc + WIN_WRITEDMA_QUEUED_EXT = 0x36 + WIN_WRITE_BUFFER = 0xe8 + WIN_WRITE_EXT = 0x34 + WIN_WRITE_LONG = 0x32 + WIN_WRITE_LONG_ONCE = 0x33 + WIN_WRITE_ONCE = 0x31 + WIN_WRITE_SAME = 0xe9 + WIN_WRITE_VERIFY = 0x3c WNOHANG = 0x1 WNOTHREAD = 0x20000000 WNOWAIT = 0x1000000 @@ -1953,7 +2208,9 @@ const ( XATTR_CREATE = 0x1 XATTR_REPLACE = 0x2 XCASE = 0x4 + XENFS_SUPER_MAGIC = 0xabba1974 XTABS = 0x1800 + ZSMALLOC_MAGIC = 0x58295829 ) // Errors @@ -2135,174 +2392,182 @@ const ( ) // Error table -var errors = [...]string{ - 1: "operation not permitted", - 2: "no such file or directory", - 3: "no such process", - 4: "interrupted system call", - 5: "input/output error", - 6: "no such device or address", - 7: "argument list too long", - 8: "exec format error", - 9: "bad file descriptor", - 10: "no child processes", - 11: "resource temporarily unavailable", - 12: "cannot allocate memory", - 13: "permission denied", - 14: "bad address", - 15: "block device required", - 16: "device or resource busy", - 17: "file exists", - 18: "invalid cross-device link", - 19: "no such device", - 20: "not a directory", - 21: "is a directory", - 22: "invalid argument", - 23: "too many open files in system", - 24: "too many open files", - 25: "inappropriate ioctl for device", - 26: "text file busy", - 27: "file too large", - 28: "no space left on device", - 29: "illegal seek", - 30: "read-only file system", - 31: "too many links", - 32: "broken pipe", - 33: "numerical argument out of domain", - 34: "numerical result out of range", - 35: "no message of desired type", - 36: "identifier removed", - 37: "channel number out of range", - 38: "level 2 not synchronized", - 39: "level 3 halted", - 40: "level 3 reset", - 41: "link number out of range", - 42: "protocol driver not attached", - 43: "no CSI structure available", - 44: "level 2 halted", - 45: "resource deadlock avoided", - 46: "no locks available", - 50: "invalid exchange", - 51: "invalid request descriptor", - 52: "exchange full", - 53: "no anode", - 54: "invalid request code", - 55: "invalid slot", - 56: "file locking deadlock error", - 59: "bad font file format", - 60: "device not a stream", - 61: "no data available", - 62: "timer expired", - 63: "out of streams resources", - 64: "machine is not on the network", - 65: "package not installed", - 66: "object is remote", - 67: "link has been severed", - 68: "advertise error", - 69: "srmount error", - 70: "communication error on send", - 71: "protocol error", - 73: "RFS specific error", - 74: "multihop attempted", - 77: "bad message", - 78: "file name too long", - 79: "value too large for defined data type", - 80: "name not unique on network", - 81: "file descriptor in bad state", - 82: "remote address changed", - 83: "can not access a needed shared library", - 84: "accessing a corrupted shared library", - 85: ".lib section in a.out corrupted", - 86: "attempting to link in too many shared libraries", - 87: "cannot exec a shared library directly", - 88: "invalid or incomplete multibyte or wide character", - 89: "function not implemented", - 90: "too many levels of symbolic links", - 91: "interrupted system call should be restarted", - 92: "streams pipe error", - 93: "directory not empty", - 94: "too many users", - 95: "socket operation on non-socket", - 96: "destination address required", - 97: "message too long", - 98: "protocol wrong type for socket", - 99: "protocol not available", - 120: "protocol not supported", - 121: "socket type not supported", - 122: "operation not supported", - 123: "protocol family not supported", - 124: "address family not supported by protocol", - 125: "address already in use", - 126: "cannot assign requested address", - 127: "network is down", - 128: "network is unreachable", - 129: "network dropped connection on reset", - 130: "software caused connection abort", - 131: "connection reset by peer", - 132: "no buffer space available", - 133: "transport endpoint is already connected", - 134: "transport endpoint is not connected", - 135: "structure needs cleaning", - 137: "not a XENIX named type file", - 138: "no XENIX semaphores available", - 139: "is a named type file", - 140: "remote I/O error", - 141: "unknown error 141", - 142: "unknown error 142", - 143: "cannot send after transport endpoint shutdown", - 144: "too many references: cannot splice", - 145: "connection timed out", - 146: "connection refused", - 147: "host is down", - 148: "no route to host", - 149: "operation already in progress", - 150: "operation now in progress", - 151: "stale file handle", - 158: "operation canceled", - 159: "no medium found", - 160: "wrong medium type", - 161: "required key not available", - 162: "key has expired", - 163: "key has been revoked", - 164: "key was rejected by service", - 165: "owner died", - 166: "state not recoverable", - 167: "operation not possible due to RF-kill", - 168: "memory page has hardware error", - 1133: "disk quota exceeded", +var errorList = [...]struct { + num syscall.Errno + name string + desc string +}{ + {1, "EPERM", "operation not permitted"}, + {2, "ENOENT", "no such file or directory"}, + {3, "ESRCH", "no such process"}, + {4, "EINTR", "interrupted system call"}, + {5, "EIO", "input/output error"}, + {6, "ENXIO", "no such device or address"}, + {7, "E2BIG", "argument list too long"}, + {8, "ENOEXEC", "exec format error"}, + {9, "EBADF", "bad file descriptor"}, + {10, "ECHILD", "no child processes"}, + {11, "EAGAIN", "resource temporarily unavailable"}, + {12, "ENOMEM", "cannot allocate memory"}, + {13, "EACCES", "permission denied"}, + {14, "EFAULT", "bad address"}, + {15, "ENOTBLK", "block device required"}, + {16, "EBUSY", "device or resource busy"}, + {17, "EEXIST", "file exists"}, + {18, "EXDEV", "invalid cross-device link"}, + {19, "ENODEV", "no such device"}, + {20, "ENOTDIR", "not a directory"}, + {21, "EISDIR", "is a directory"}, + {22, "EINVAL", "invalid argument"}, + {23, "ENFILE", "too many open files in system"}, + {24, "EMFILE", "too many open files"}, + {25, "ENOTTY", "inappropriate ioctl for device"}, + {26, "ETXTBSY", "text file busy"}, + {27, "EFBIG", "file too large"}, + {28, "ENOSPC", "no space left on device"}, + {29, "ESPIPE", "illegal seek"}, + {30, "EROFS", "read-only file system"}, + {31, "EMLINK", "too many links"}, + {32, "EPIPE", "broken pipe"}, + {33, "EDOM", "numerical argument out of domain"}, + {34, "ERANGE", "numerical result out of range"}, + {35, "ENOMSG", "no message of desired type"}, + {36, "EIDRM", "identifier removed"}, + {37, "ECHRNG", "channel number out of range"}, + {38, "EL2NSYNC", "level 2 not synchronized"}, + {39, "EL3HLT", "level 3 halted"}, + {40, "EL3RST", "level 3 reset"}, + {41, "ELNRNG", "link number out of range"}, + {42, "EUNATCH", "protocol driver not attached"}, + {43, "ENOCSI", "no CSI structure available"}, + {44, "EL2HLT", "level 2 halted"}, + {45, "EDEADLK", "resource deadlock avoided"}, + {46, "ENOLCK", "no locks available"}, + {50, "EBADE", "invalid exchange"}, + {51, "EBADR", "invalid request descriptor"}, + {52, "EXFULL", "exchange full"}, + {53, "ENOANO", "no anode"}, + {54, "EBADRQC", "invalid request code"}, + {55, "EBADSLT", "invalid slot"}, + {56, "EDEADLOCK", "file locking deadlock error"}, + {59, "EBFONT", "bad font file format"}, + {60, "ENOSTR", "device not a stream"}, + {61, "ENODATA", "no data available"}, + {62, "ETIME", "timer expired"}, + {63, "ENOSR", "out of streams resources"}, + {64, "ENONET", "machine is not on the network"}, + {65, "ENOPKG", "package not installed"}, + {66, "EREMOTE", "object is remote"}, + {67, "ENOLINK", "link has been severed"}, + {68, "EADV", "advertise error"}, + {69, "ESRMNT", "srmount error"}, + {70, "ECOMM", "communication error on send"}, + {71, "EPROTO", "protocol error"}, + {73, "EDOTDOT", "RFS specific error"}, + {74, "EMULTIHOP", "multihop attempted"}, + {77, "EBADMSG", "bad message"}, + {78, "ENAMETOOLONG", "file name too long"}, + {79, "EOVERFLOW", "value too large for defined data type"}, + {80, "ENOTUNIQ", "name not unique on network"}, + {81, "EBADFD", "file descriptor in bad state"}, + {82, "EREMCHG", "remote address changed"}, + {83, "ELIBACC", "can not access a needed shared library"}, + {84, "ELIBBAD", "accessing a corrupted shared library"}, + {85, "ELIBSCN", ".lib section in a.out corrupted"}, + {86, "ELIBMAX", "attempting to link in too many shared libraries"}, + {87, "ELIBEXEC", "cannot exec a shared library directly"}, + {88, "EILSEQ", "invalid or incomplete multibyte or wide character"}, + {89, "ENOSYS", "function not implemented"}, + {90, "ELOOP", "too many levels of symbolic links"}, + {91, "ERESTART", "interrupted system call should be restarted"}, + {92, "ESTRPIPE", "streams pipe error"}, + {93, "ENOTEMPTY", "directory not empty"}, + {94, "EUSERS", "too many users"}, + {95, "ENOTSOCK", "socket operation on non-socket"}, + {96, "EDESTADDRREQ", "destination address required"}, + {97, "EMSGSIZE", "message too long"}, + {98, "EPROTOTYPE", "protocol wrong type for socket"}, + {99, "ENOPROTOOPT", "protocol not available"}, + {120, "EPROTONOSUPPORT", "protocol not supported"}, + {121, "ESOCKTNOSUPPORT", "socket type not supported"}, + {122, "ENOTSUP", "operation not supported"}, + {123, "EPFNOSUPPORT", "protocol family not supported"}, + {124, "EAFNOSUPPORT", "address family not supported by protocol"}, + {125, "EADDRINUSE", "address already in use"}, + {126, "EADDRNOTAVAIL", "cannot assign requested address"}, + {127, "ENETDOWN", "network is down"}, + {128, "ENETUNREACH", "network is unreachable"}, + {129, "ENETRESET", "network dropped connection on reset"}, + {130, "ECONNABORTED", "software caused connection abort"}, + {131, "ECONNRESET", "connection reset by peer"}, + {132, "ENOBUFS", "no buffer space available"}, + {133, "EISCONN", "transport endpoint is already connected"}, + {134, "ENOTCONN", "transport endpoint is not connected"}, + {135, "EUCLEAN", "structure needs cleaning"}, + {137, "ENOTNAM", "not a XENIX named type file"}, + {138, "ENAVAIL", "no XENIX semaphores available"}, + {139, "EISNAM", "is a named type file"}, + {140, "EREMOTEIO", "remote I/O error"}, + {141, "EINIT", "unknown error 141"}, + {142, "EREMDEV", "unknown error 142"}, + {143, "ESHUTDOWN", "cannot send after transport endpoint shutdown"}, + {144, "ETOOMANYREFS", "too many references: cannot splice"}, + {145, "ETIMEDOUT", "connection timed out"}, + {146, "ECONNREFUSED", "connection refused"}, + {147, "EHOSTDOWN", "host is down"}, + {148, "EHOSTUNREACH", "no route to host"}, + {149, "EALREADY", "operation already in progress"}, + {150, "EINPROGRESS", "operation now in progress"}, + {151, "ESTALE", "stale file handle"}, + {158, "ECANCELED", "operation canceled"}, + {159, "ENOMEDIUM", "no medium found"}, + {160, "EMEDIUMTYPE", "wrong medium type"}, + {161, "ENOKEY", "required key not available"}, + {162, "EKEYEXPIRED", "key has expired"}, + {163, "EKEYREVOKED", "key has been revoked"}, + {164, "EKEYREJECTED", "key was rejected by service"}, + {165, "EOWNERDEAD", "owner died"}, + {166, "ENOTRECOVERABLE", "state not recoverable"}, + {167, "ERFKILL", "operation not possible due to RF-kill"}, + {168, "EHWPOISON", "memory page has hardware error"}, + {1133, "EDQUOT", "disk quota exceeded"}, } // Signal table -var signals = [...]string{ - 1: "hangup", - 2: "interrupt", - 3: "quit", - 4: "illegal instruction", - 5: "trace/breakpoint trap", - 6: "aborted", - 7: "EMT trap", - 8: "floating point exception", - 9: "killed", - 10: "bus error", - 11: "segmentation fault", - 12: "bad system call", - 13: "broken pipe", - 14: "alarm clock", - 15: "terminated", - 16: "user defined signal 1", - 17: "user defined signal 2", - 18: "child exited", - 19: "power failure", - 20: "window changed", - 21: "urgent I/O condition", - 22: "I/O possible", - 23: "stopped (signal)", - 24: "stopped", - 25: "continued", - 26: "stopped (tty input)", - 27: "stopped (tty output)", - 28: "virtual timer expired", - 29: "profiling timer expired", - 30: "CPU time limit exceeded", - 31: "file size limit exceeded", +var signalList = [...]struct { + num syscall.Signal + name string + desc string +}{ + {1, "SIGHUP", "hangup"}, + {2, "SIGINT", "interrupt"}, + {3, "SIGQUIT", "quit"}, + {4, "SIGILL", "illegal instruction"}, + {5, "SIGTRAP", "trace/breakpoint trap"}, + {6, "SIGABRT", "aborted"}, + {7, "SIGEMT", "EMT trap"}, + {8, "SIGFPE", "floating point exception"}, + {9, "SIGKILL", "killed"}, + {10, "SIGBUS", "bus error"}, + {11, "SIGSEGV", "segmentation fault"}, + {12, "SIGSYS", "bad system call"}, + {13, "SIGPIPE", "broken pipe"}, + {14, "SIGALRM", "alarm clock"}, + {15, "SIGTERM", "terminated"}, + {16, "SIGUSR1", "user defined signal 1"}, + {17, "SIGUSR2", "user defined signal 2"}, + {18, "SIGCHLD", "child exited"}, + {19, "SIGPWR", "power failure"}, + {20, "SIGWINCH", "window changed"}, + {21, "SIGURG", "urgent I/O condition"}, + {22, "SIGIO", "I/O possible"}, + {23, "SIGSTOP", "stopped (signal)"}, + {24, "SIGTSTP", "stopped"}, + {25, "SIGCONT", "continued"}, + {26, "SIGTTIN", "stopped (tty input)"}, + {27, "SIGTTOU", "stopped (tty output)"}, + {28, "SIGVTALRM", "virtual timer expired"}, + {29, "SIGPROF", "profiling timer expired"}, + {30, "SIGXCPU", "CPU time limit exceeded"}, + {31, "SIGXFSZ", "file size limit exceeded"}, } diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go index 509418ef2e0..710410efdd8 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go @@ -11,6 +11,11 @@ package unix import "syscall" const ( + AAFS_MAGIC = 0x5a3c69f0 + ADFS_SUPER_MAGIC = 0xadf5 + AFFS_SUPER_MAGIC = 0xadff + AFS_FS_MAGIC = 0x6b414653 + AFS_SUPER_MAGIC = 0x5346414f AF_ALG = 0x26 AF_APPLETALK = 0x5 AF_ASH = 0x12 @@ -66,6 +71,7 @@ const ( ALG_SET_IV = 0x2 ALG_SET_KEY = 0x1 ALG_SET_OP = 0x3 + ANON_INODE_FS_MAGIC = 0x9041934 ARPHRD_6LOWPAN = 0x339 ARPHRD_ADAPT = 0x108 ARPHRD_APPLETLK = 0x8 @@ -133,6 +139,7 @@ const ( ARPHRD_VOID = 0xffff ARPHRD_VSOCKMON = 0x33a ARPHRD_X25 = 0x10f + AUTOFS_SUPER_MAGIC = 0x187 B0 = 0x0 B1000000 = 0x17 B110 = 0x3 @@ -164,6 +171,9 @@ const ( B75 = 0x2 B921600 = 0x16 B9600 = 0xd + BALLOON_KVM_MAGIC = 0x13661366 + BDEVFS_MAGIC = 0x62646576 + BINFMTFS_MAGIC = 0x42494e4d BLKBSZGET = 0x40081270 BLKBSZSET = 0x80081271 BLKFLSBUF = 0x20001261 @@ -188,6 +198,7 @@ const ( BPF_AND = 0x50 BPF_B = 0x10 BPF_DIV = 0x30 + BPF_FS_MAGIC = 0xcafe4a11 BPF_H = 0x8 BPF_IMM = 0x0 BPF_IND = 0x40 @@ -229,6 +240,8 @@ const ( BS0 = 0x0 BS1 = 0x8000 BSDLY = 0x8000 + BTRFS_SUPER_MAGIC = 0x9123683e + BTRFS_TEST_MAGIC = 0x73727279 CAN_BCM = 0x2 CAN_EFF_FLAG = 0x80000000 CAN_EFF_ID_BITS = 0x1d @@ -252,6 +265,8 @@ const ( CBAUD = 0xff CBAUDEX = 0x0 CFLUSH = 0xf + CGROUP2_SUPER_MAGIC = 0x63677270 + CGROUP_SUPER_MAGIC = 0x27e0eb CIBAUD = 0xff0000 CLOCAL = 0x8000 CLOCK_BOOTTIME = 0x7 @@ -294,10 +309,12 @@ const ( CLONE_VFORK = 0x4000 CLONE_VM = 0x100 CMSPAR = 0x40000000 + CODA_SUPER_MAGIC = 0x73757245 CR0 = 0x0 CR1 = 0x1000 CR2 = 0x2000 CR3 = 0x3000 + CRAMFS_MAGIC = 0x28cd3d45 CRDLY = 0x3000 CREAD = 0x800 CRTSCTS = 0x80000000 @@ -312,6 +329,9 @@ const ( CSTOP = 0x13 CSTOPB = 0x400 CSUSP = 0x1a + DAXFS_MAGIC = 0x64646178 + DEBUGFS_MAGIC = 0x64626720 + DEVPTS_SUPER_MAGIC = 0x1cd1 DT_BLK = 0x6 DT_CHR = 0x2 DT_DIR = 0x4 @@ -328,9 +348,12 @@ const ( ECHOKE = 0x1 ECHONL = 0x10 ECHOPRT = 0x20 + ECRYPTFS_SUPER_MAGIC = 0xf15f EFD_CLOEXEC = 0x80000 EFD_NONBLOCK = 0x800 EFD_SEMAPHORE = 0x1 + EFIVARFS_MAGIC = 0xde5e81e4 + EFS_SUPER_MAGIC = 0x414a53 ENCODING_DEFAULT = 0x0 ENCODING_FM_MARK = 0x3 ENCODING_FM_SPACE = 0x4 @@ -392,6 +415,7 @@ const ( ETH_P_ECONET = 0x18 ETH_P_EDSA = 0xdada ETH_P_ERSPAN = 0x88be + ETH_P_ERSPAN2 = 0x22eb ETH_P_FCOE = 0x8906 ETH_P_FIP = 0x8914 ETH_P_HDLC = 0x19 @@ -445,9 +469,14 @@ const ( ETH_P_WCCP = 0x883e ETH_P_X25 = 0x805 ETH_P_XDSA = 0xf8 + EXABYTE_ENABLE_NEST = 0xf0 + EXT2_SUPER_MAGIC = 0xef53 + EXT3_SUPER_MAGIC = 0xef53 + EXT4_SUPER_MAGIC = 0xef53 EXTA = 0xe EXTB = 0xf EXTPROC = 0x10000000 + F2FS_SUPER_MAGIC = 0xf2f52010 FALLOC_FL_COLLAPSE_RANGE = 0x8 FALLOC_FL_INSERT_RANGE = 0x20 FALLOC_FL_KEEP_SIZE = 0x1 @@ -481,6 +510,7 @@ const ( FS_POLICY_FLAGS_PAD_8 = 0x1 FS_POLICY_FLAGS_PAD_MASK = 0x3 FS_POLICY_FLAGS_VALID = 0x3 + FUTEXFS_SUPER_MAGIC = 0xbad1dea F_ADD_SEALS = 0x409 F_DUPFD = 0x0 F_DUPFD_CLOEXEC = 0x406 @@ -542,6 +572,49 @@ const ( GENL_UNS_ADMIN_PERM = 0x10 GRND_NONBLOCK = 0x1 GRND_RANDOM = 0x2 + HDIO_DRIVE_CMD = 0x31f + HDIO_DRIVE_CMD_AEB = 0x31e + HDIO_DRIVE_CMD_HDR_SIZE = 0x4 + HDIO_DRIVE_HOB_HDR_SIZE = 0x8 + HDIO_DRIVE_RESET = 0x31c + HDIO_DRIVE_TASK = 0x31e + HDIO_DRIVE_TASKFILE = 0x31d + HDIO_DRIVE_TASK_HDR_SIZE = 0x8 + HDIO_GETGEO = 0x301 + HDIO_GET_32BIT = 0x309 + HDIO_GET_ACOUSTIC = 0x30f + HDIO_GET_ADDRESS = 0x310 + HDIO_GET_BUSSTATE = 0x31a + HDIO_GET_DMA = 0x30b + HDIO_GET_IDENTITY = 0x30d + HDIO_GET_KEEPSETTINGS = 0x308 + HDIO_GET_MULTCOUNT = 0x304 + HDIO_GET_NICE = 0x30c + HDIO_GET_NOWERR = 0x30a + HDIO_GET_QDMA = 0x305 + HDIO_GET_UNMASKINTR = 0x302 + HDIO_GET_WCACHE = 0x30e + HDIO_OBSOLETE_IDENTITY = 0x307 + HDIO_SCAN_HWIF = 0x328 + HDIO_SET_32BIT = 0x324 + HDIO_SET_ACOUSTIC = 0x32c + HDIO_SET_ADDRESS = 0x32f + HDIO_SET_BUSSTATE = 0x32d + HDIO_SET_DMA = 0x326 + HDIO_SET_KEEPSETTINGS = 0x323 + HDIO_SET_MULTCOUNT = 0x321 + HDIO_SET_NICE = 0x329 + HDIO_SET_NOWERR = 0x325 + HDIO_SET_PIO_MODE = 0x327 + HDIO_SET_QDMA = 0x32e + HDIO_SET_UNMASKINTR = 0x322 + HDIO_SET_WCACHE = 0x32b + HDIO_SET_XFER = 0x306 + HDIO_TRISTATE_HWIF = 0x31b + HDIO_UNREGISTER_HWIF = 0x32a + HOSTFS_SUPER_MAGIC = 0xc0ffee + HPFS_SUPER_MAGIC = 0xf995e849 + HUGETLBFS_MAGIC = 0x958458f6 HUPCL = 0x4000 IBSHIFT = 0x10 ICANON = 0x100 @@ -792,12 +865,14 @@ const ( IP_UNICAST_IF = 0x32 IP_XFRM_POLICY = 0x11 ISIG = 0x80 + ISOFS_SUPER_MAGIC = 0x9660 ISTRIP = 0x20 IUCLC = 0x1000 IUTF8 = 0x4000 IXANY = 0x800 IXOFF = 0x400 IXON = 0x200 + JFFS2_SUPER_MAGIC = 0x72b6 KEYCTL_ASSUME_AUTHORITY = 0x10 KEYCTL_CHOWN = 0x4 KEYCTL_CLEAR = 0x7 @@ -893,9 +968,15 @@ const ( MCL_CURRENT = 0x2000 MCL_FUTURE = 0x4000 MCL_ONFAULT = 0x8000 + MINIX2_SUPER_MAGIC = 0x2468 + MINIX2_SUPER_MAGIC2 = 0x2478 + MINIX3_SUPER_MAGIC = 0x4d5a + MINIX_SUPER_MAGIC = 0x137f + MINIX_SUPER_MAGIC2 = 0x138f MNT_DETACH = 0x2 MNT_EXPIRE = 0x4 MNT_FORCE = 0x1 + MSDOS_SUPER_MAGIC = 0x4d44 MSG_BATCH = 0x40000 MSG_CMSG_CLOEXEC = 0x40000000 MSG_CONFIRM = 0x800 @@ -955,7 +1036,9 @@ const ( MS_SYNCHRONOUS = 0x10 MS_UNBINDABLE = 0x20000 MS_VERBOSE = 0x8000 + MTD_INODE_FS_MAGIC = 0x11307854 NAME_MAX = 0xff + NCP_SUPER_MAGIC = 0x564c NETLINK_ADD_MEMBERSHIP = 0x1 NETLINK_AUDIT = 0x9 NETLINK_BROADCAST_ERROR = 0x4 @@ -990,6 +1073,37 @@ const ( NETLINK_UNUSED = 0x1 NETLINK_USERSOCK = 0x2 NETLINK_XFRM = 0x6 + NFNETLINK_V0 = 0x0 + NFNLGRP_ACCT_QUOTA = 0x8 + NFNLGRP_CONNTRACK_DESTROY = 0x3 + NFNLGRP_CONNTRACK_EXP_DESTROY = 0x6 + NFNLGRP_CONNTRACK_EXP_NEW = 0x4 + NFNLGRP_CONNTRACK_EXP_UPDATE = 0x5 + NFNLGRP_CONNTRACK_NEW = 0x1 + NFNLGRP_CONNTRACK_UPDATE = 0x2 + NFNLGRP_MAX = 0x9 + NFNLGRP_NFTABLES = 0x7 + NFNLGRP_NFTRACE = 0x9 + NFNLGRP_NONE = 0x0 + NFNL_BATCH_MAX = 0x1 + NFNL_MSG_BATCH_BEGIN = 0x10 + NFNL_MSG_BATCH_END = 0x11 + NFNL_NFA_NEST = 0x8000 + NFNL_SUBSYS_ACCT = 0x7 + NFNL_SUBSYS_COUNT = 0xc + NFNL_SUBSYS_CTHELPER = 0x9 + NFNL_SUBSYS_CTNETLINK = 0x1 + NFNL_SUBSYS_CTNETLINK_EXP = 0x2 + NFNL_SUBSYS_CTNETLINK_TIMEOUT = 0x8 + NFNL_SUBSYS_IPSET = 0x6 + NFNL_SUBSYS_NFTABLES = 0xa + NFNL_SUBSYS_NFT_COMPAT = 0xb + NFNL_SUBSYS_NONE = 0x0 + NFNL_SUBSYS_OSF = 0x5 + NFNL_SUBSYS_QUEUE = 0x3 + NFNL_SUBSYS_ULOG = 0x4 + NFS_SUPER_MAGIC = 0x6969 + NILFS_SUPER_MAGIC = 0x3434 NL0 = 0x0 NL1 = 0x100 NL2 = 0x200 @@ -1024,6 +1138,8 @@ const ( NLM_F_REQUEST = 0x1 NLM_F_ROOT = 0x100 NOFLSH = 0x80000000 + NSFS_MAGIC = 0x6e736673 + OCFS2_SUPER_MAGIC = 0x7461636f OCRNL = 0x8 OFDEL = 0x80 OFILL = 0x40 @@ -1031,7 +1147,9 @@ const ( ONLCR = 0x2 ONLRET = 0x20 ONOCR = 0x10 + OPENPROM_SUPER_MAGIC = 0x9fa1 OPOST = 0x1 + OVERLAYFS_SUPER_MAGIC = 0x794c7630 O_ACCMODE = 0x3 O_APPEND = 0x400 O_ASYNC = 0x2000 @@ -1118,14 +1236,17 @@ const ( PERF_EVENT_IOC_ID = 0x40082407 PERF_EVENT_IOC_PAUSE_OUTPUT = 0x80042409 PERF_EVENT_IOC_PERIOD = 0x80082404 + PERF_EVENT_IOC_QUERY_BPF = 0xc008240a PERF_EVENT_IOC_REFRESH = 0x20002402 PERF_EVENT_IOC_RESET = 0x20002403 PERF_EVENT_IOC_SET_BPF = 0x80042408 PERF_EVENT_IOC_SET_FILTER = 0x80082406 PERF_EVENT_IOC_SET_OUTPUT = 0x20002405 + PIPEFS_MAGIC = 0x50495045 PRIO_PGRP = 0x1 PRIO_PROCESS = 0x0 PRIO_USER = 0x2 + PROC_SUPER_MAGIC = 0x9fa0 PROT_EXEC = 0x4 PROT_GROWSDOWN = 0x1000000 PROT_GROWSUP = 0x2000000 @@ -1232,6 +1353,7 @@ const ( PR_TSC_SIGSEGV = 0x2 PR_UNALIGN_NOPRINT = 0x1 PR_UNALIGN_SIGBUS = 0x2 + PSTOREFS_MAGIC = 0x6165676c PTRACE_ATTACH = 0x10 PTRACE_CONT = 0x7 PTRACE_DETACH = 0x11 @@ -1277,6 +1399,7 @@ const ( PTRACE_POKETEXT = 0x4 PTRACE_POKEUSR = 0x6 PTRACE_SECCOMP_GET_FILTER = 0x420c + PTRACE_SECCOMP_GET_METADATA = 0x420d PTRACE_SEIZE = 0x4206 PTRACE_SETEVRREGS = 0x15 PTRACE_SETFPREGS = 0xf @@ -1346,6 +1469,11 @@ const ( PT_VSR0 = 0x96 PT_VSR31 = 0xd4 PT_XER = 0x25 + QNX4_SUPER_MAGIC = 0x2f + QNX6_SUPER_MAGIC = 0x68191122 + RAMFS_MAGIC = 0x858458f6 + RDTGROUP_SUPER_MAGIC = 0x7655821 + REISERFS_SUPER_MAGIC = 0x52654973 RLIMIT_AS = 0x9 RLIMIT_CORE = 0x4 RLIMIT_CPU = 0x0 @@ -1531,6 +1659,8 @@ const ( SECCOMP_MODE_DISABLED = 0x0 SECCOMP_MODE_FILTER = 0x2 SECCOMP_MODE_STRICT = 0x1 + SECURITYFS_MAGIC = 0x73636673 + SELINUX_MAGIC = 0xf97cff8c SHUT_RD = 0x0 SHUT_RDWR = 0x2 SHUT_WR = 0x1 @@ -1615,6 +1745,23 @@ const ( SIOCSPGRP = 0x8902 SIOCSRARP = 0x8962 SIOCWANDEV = 0x894a + SMACK_MAGIC = 0x43415d53 + SMART_AUTOSAVE = 0xd2 + SMART_AUTO_OFFLINE = 0xdb + SMART_DISABLE = 0xd9 + SMART_ENABLE = 0xd8 + SMART_HCYL_PASS = 0xc2 + SMART_IMMEDIATE_OFFLINE = 0xd4 + SMART_LCYL_PASS = 0x4f + SMART_READ_LOG_SECTOR = 0xd5 + SMART_READ_THRESHOLDS = 0xd1 + SMART_READ_VALUES = 0xd0 + SMART_SAVE = 0xd3 + SMART_STATUS = 0xda + SMART_WRITE_LOG_SECTOR = 0xd6 + SMART_WRITE_THRESHOLDS = 0xd7 + SMB_SUPER_MAGIC = 0x517b + SOCKFS_MAGIC = 0x534f434b SOCK_CLOEXEC = 0x80000 SOCK_DCCP = 0x6 SOCK_DGRAM = 0x2 @@ -1725,6 +1872,8 @@ const ( SPLICE_F_MORE = 0x4 SPLICE_F_MOVE = 0x1 SPLICE_F_NONBLOCK = 0x2 + SQUASHFS_MAGIC = 0x73717368 + STACK_END_MAGIC = 0x57ac6e9d STATX_ALL = 0xfff STATX_ATIME = 0x20 STATX_ATTR_APPEND = 0x20 @@ -1746,6 +1895,7 @@ const ( STATX_TYPE = 0x1 STATX_UID = 0x8 STATX__RESERVED = 0x80000000 + SYSFS_MAGIC = 0x62656572 S_BLKSIZE = 0x200 S_IEXEC = 0x40 S_IFBLK = 0x6000 @@ -1931,7 +2081,27 @@ const ( TIOCSTOP = 0x2000746f TIOCSWINSZ = 0x80087467 TIOCVHANGUP = 0x5437 + TMPFS_MAGIC = 0x1021994 TOSTOP = 0x400000 + TPACKET_ALIGNMENT = 0x10 + TPACKET_HDRLEN = 0x34 + TP_STATUS_AVAILABLE = 0x0 + TP_STATUS_BLK_TMO = 0x20 + TP_STATUS_COPY = 0x2 + TP_STATUS_CSUMNOTREADY = 0x8 + TP_STATUS_CSUM_VALID = 0x80 + TP_STATUS_KERNEL = 0x0 + TP_STATUS_LOSING = 0x4 + TP_STATUS_SENDING = 0x2 + TP_STATUS_SEND_REQUEST = 0x1 + TP_STATUS_TS_RAW_HARDWARE = -0x80000000 + TP_STATUS_TS_SOFTWARE = 0x20000000 + TP_STATUS_TS_SYS_HARDWARE = 0x40000000 + TP_STATUS_USER = 0x1 + TP_STATUS_VLAN_TPID_VALID = 0x40 + TP_STATUS_VLAN_VALID = 0x10 + TP_STATUS_WRONG_FORMAT = 0x4 + TRACEFS_MAGIC = 0x74726163 TS_COMM_LEN = 0x20 TUNATTACHFILTER = 0x801054d5 TUNDETACHFILTER = 0x801054d6 @@ -1943,6 +2113,7 @@ const ( TUNGETVNETHDRSZ = 0x400454d7 TUNGETVNETLE = 0x400454dd TUNSETDEBUG = 0x800454c9 + TUNSETFILTEREBPF = 0x400454e1 TUNSETGROUP = 0x800454ce TUNSETIFF = 0x800454ca TUNSETIFINDEX = 0x800454da @@ -1953,13 +2124,17 @@ const ( TUNSETPERSIST = 0x800454cb TUNSETQUEUE = 0x800454d9 TUNSETSNDBUF = 0x800454d4 + TUNSETSTEERINGEBPF = 0x400454e0 TUNSETTXFILTER = 0x800454d1 TUNSETVNETBE = 0x800454de TUNSETVNETHDRSZ = 0x800454d8 TUNSETVNETLE = 0x800454dc + UDF_SUPER_MAGIC = 0x15013346 UMOUNT_NOFOLLOW = 0x8 + USBDEVICE_SUPER_MAGIC = 0x9fa2 UTIME_NOW = 0x3fffffff UTIME_OMIT = 0x3ffffffe + V9FS_MAGIC = 0x1021997 VDISCARD = 0x10 VEOF = 0x4 VEOL = 0x6 @@ -2001,6 +2176,86 @@ const ( WDIOC_SETPRETIMEOUT = 0xc0045708 WDIOC_SETTIMEOUT = 0xc0045706 WEXITED = 0x4 + WIN_ACKMEDIACHANGE = 0xdb + WIN_CHECKPOWERMODE1 = 0xe5 + WIN_CHECKPOWERMODE2 = 0x98 + WIN_DEVICE_RESET = 0x8 + WIN_DIAGNOSE = 0x90 + WIN_DOORLOCK = 0xde + WIN_DOORUNLOCK = 0xdf + WIN_DOWNLOAD_MICROCODE = 0x92 + WIN_FLUSH_CACHE = 0xe7 + WIN_FLUSH_CACHE_EXT = 0xea + WIN_FORMAT = 0x50 + WIN_GETMEDIASTATUS = 0xda + WIN_IDENTIFY = 0xec + WIN_IDENTIFY_DMA = 0xee + WIN_IDLEIMMEDIATE = 0xe1 + WIN_INIT = 0x60 + WIN_MEDIAEJECT = 0xed + WIN_MULTREAD = 0xc4 + WIN_MULTREAD_EXT = 0x29 + WIN_MULTWRITE = 0xc5 + WIN_MULTWRITE_EXT = 0x39 + WIN_NOP = 0x0 + WIN_PACKETCMD = 0xa0 + WIN_PIDENTIFY = 0xa1 + WIN_POSTBOOT = 0xdc + WIN_PREBOOT = 0xdd + WIN_QUEUED_SERVICE = 0xa2 + WIN_READ = 0x20 + WIN_READDMA = 0xc8 + WIN_READDMA_EXT = 0x25 + WIN_READDMA_ONCE = 0xc9 + WIN_READDMA_QUEUED = 0xc7 + WIN_READDMA_QUEUED_EXT = 0x26 + WIN_READ_BUFFER = 0xe4 + WIN_READ_EXT = 0x24 + WIN_READ_LONG = 0x22 + WIN_READ_LONG_ONCE = 0x23 + WIN_READ_NATIVE_MAX = 0xf8 + WIN_READ_NATIVE_MAX_EXT = 0x27 + WIN_READ_ONCE = 0x21 + WIN_RECAL = 0x10 + WIN_RESTORE = 0x10 + WIN_SECURITY_DISABLE = 0xf6 + WIN_SECURITY_ERASE_PREPARE = 0xf3 + WIN_SECURITY_ERASE_UNIT = 0xf4 + WIN_SECURITY_FREEZE_LOCK = 0xf5 + WIN_SECURITY_SET_PASS = 0xf1 + WIN_SECURITY_UNLOCK = 0xf2 + WIN_SEEK = 0x70 + WIN_SETFEATURES = 0xef + WIN_SETIDLE1 = 0xe3 + WIN_SETIDLE2 = 0x97 + WIN_SETMULT = 0xc6 + WIN_SET_MAX = 0xf9 + WIN_SET_MAX_EXT = 0x37 + WIN_SLEEPNOW1 = 0xe6 + WIN_SLEEPNOW2 = 0x99 + WIN_SMART = 0xb0 + WIN_SPECIFY = 0x91 + WIN_SRST = 0x8 + WIN_STANDBY = 0xe2 + WIN_STANDBY2 = 0x96 + WIN_STANDBYNOW1 = 0xe0 + WIN_STANDBYNOW2 = 0x94 + WIN_VERIFY = 0x40 + WIN_VERIFY_EXT = 0x42 + WIN_VERIFY_ONCE = 0x41 + WIN_WRITE = 0x30 + WIN_WRITEDMA = 0xca + WIN_WRITEDMA_EXT = 0x35 + WIN_WRITEDMA_ONCE = 0xcb + WIN_WRITEDMA_QUEUED = 0xcc + WIN_WRITEDMA_QUEUED_EXT = 0x36 + WIN_WRITE_BUFFER = 0xe8 + WIN_WRITE_EXT = 0x34 + WIN_WRITE_LONG = 0x32 + WIN_WRITE_LONG_ONCE = 0x33 + WIN_WRITE_ONCE = 0x31 + WIN_WRITE_SAME = 0xe9 + WIN_WRITE_VERIFY = 0x3c WNOHANG = 0x1 WNOTHREAD = 0x20000000 WNOWAIT = 0x1000000 @@ -2010,7 +2265,9 @@ const ( XATTR_CREATE = 0x1 XATTR_REPLACE = 0x2 XCASE = 0x4000 + XENFS_SUPER_MAGIC = 0xabba1974 XTABS = 0xc00 + ZSMALLOC_MAGIC = 0x58295829 ) // Errors @@ -2190,172 +2447,180 @@ const ( ) // Error table -var errors = [...]string{ - 1: "operation not permitted", - 2: "no such file or directory", - 3: "no such process", - 4: "interrupted system call", - 5: "input/output error", - 6: "no such device or address", - 7: "argument list too long", - 8: "exec format error", - 9: "bad file descriptor", - 10: "no child processes", - 11: "resource temporarily unavailable", - 12: "cannot allocate memory", - 13: "permission denied", - 14: "bad address", - 15: "block device required", - 16: "device or resource busy", - 17: "file exists", - 18: "invalid cross-device link", - 19: "no such device", - 20: "not a directory", - 21: "is a directory", - 22: "invalid argument", - 23: "too many open files in system", - 24: "too many open files", - 25: "inappropriate ioctl for device", - 26: "text file busy", - 27: "file too large", - 28: "no space left on device", - 29: "illegal seek", - 30: "read-only file system", - 31: "too many links", - 32: "broken pipe", - 33: "numerical argument out of domain", - 34: "numerical result out of range", - 35: "resource deadlock avoided", - 36: "file name too long", - 37: "no locks available", - 38: "function not implemented", - 39: "directory not empty", - 40: "too many levels of symbolic links", - 42: "no message of desired type", - 43: "identifier removed", - 44: "channel number out of range", - 45: "level 2 not synchronized", - 46: "level 3 halted", - 47: "level 3 reset", - 48: "link number out of range", - 49: "protocol driver not attached", - 50: "no CSI structure available", - 51: "level 2 halted", - 52: "invalid exchange", - 53: "invalid request descriptor", - 54: "exchange full", - 55: "no anode", - 56: "invalid request code", - 57: "invalid slot", - 58: "file locking deadlock error", - 59: "bad font file format", - 60: "device not a stream", - 61: "no data available", - 62: "timer expired", - 63: "out of streams resources", - 64: "machine is not on the network", - 65: "package not installed", - 66: "object is remote", - 67: "link has been severed", - 68: "advertise error", - 69: "srmount error", - 70: "communication error on send", - 71: "protocol error", - 72: "multihop attempted", - 73: "RFS specific error", - 74: "bad message", - 75: "value too large for defined data type", - 76: "name not unique on network", - 77: "file descriptor in bad state", - 78: "remote address changed", - 79: "can not access a needed shared library", - 80: "accessing a corrupted shared library", - 81: ".lib section in a.out corrupted", - 82: "attempting to link in too many shared libraries", - 83: "cannot exec a shared library directly", - 84: "invalid or incomplete multibyte or wide character", - 85: "interrupted system call should be restarted", - 86: "streams pipe error", - 87: "too many users", - 88: "socket operation on non-socket", - 89: "destination address required", - 90: "message too long", - 91: "protocol wrong type for socket", - 92: "protocol not available", - 93: "protocol not supported", - 94: "socket type not supported", - 95: "operation not supported", - 96: "protocol family not supported", - 97: "address family not supported by protocol", - 98: "address already in use", - 99: "cannot assign requested address", - 100: "network is down", - 101: "network is unreachable", - 102: "network dropped connection on reset", - 103: "software caused connection abort", - 104: "connection reset by peer", - 105: "no buffer space available", - 106: "transport endpoint is already connected", - 107: "transport endpoint is not connected", - 108: "cannot send after transport endpoint shutdown", - 109: "too many references: cannot splice", - 110: "connection timed out", - 111: "connection refused", - 112: "host is down", - 113: "no route to host", - 114: "operation already in progress", - 115: "operation now in progress", - 116: "stale file handle", - 117: "structure needs cleaning", - 118: "not a XENIX named type file", - 119: "no XENIX semaphores available", - 120: "is a named type file", - 121: "remote I/O error", - 122: "disk quota exceeded", - 123: "no medium found", - 124: "wrong medium type", - 125: "operation canceled", - 126: "required key not available", - 127: "key has expired", - 128: "key has been revoked", - 129: "key was rejected by service", - 130: "owner died", - 131: "state not recoverable", - 132: "operation not possible due to RF-kill", - 133: "memory page has hardware error", +var errorList = [...]struct { + num syscall.Errno + name string + desc string +}{ + {1, "EPERM", "operation not permitted"}, + {2, "ENOENT", "no such file or directory"}, + {3, "ESRCH", "no such process"}, + {4, "EINTR", "interrupted system call"}, + {5, "EIO", "input/output error"}, + {6, "ENXIO", "no such device or address"}, + {7, "E2BIG", "argument list too long"}, + {8, "ENOEXEC", "exec format error"}, + {9, "EBADF", "bad file descriptor"}, + {10, "ECHILD", "no child processes"}, + {11, "EAGAIN", "resource temporarily unavailable"}, + {12, "ENOMEM", "cannot allocate memory"}, + {13, "EACCES", "permission denied"}, + {14, "EFAULT", "bad address"}, + {15, "ENOTBLK", "block device required"}, + {16, "EBUSY", "device or resource busy"}, + {17, "EEXIST", "file exists"}, + {18, "EXDEV", "invalid cross-device link"}, + {19, "ENODEV", "no such device"}, + {20, "ENOTDIR", "not a directory"}, + {21, "EISDIR", "is a directory"}, + {22, "EINVAL", "invalid argument"}, + {23, "ENFILE", "too many open files in system"}, + {24, "EMFILE", "too many open files"}, + {25, "ENOTTY", "inappropriate ioctl for device"}, + {26, "ETXTBSY", "text file busy"}, + {27, "EFBIG", "file too large"}, + {28, "ENOSPC", "no space left on device"}, + {29, "ESPIPE", "illegal seek"}, + {30, "EROFS", "read-only file system"}, + {31, "EMLINK", "too many links"}, + {32, "EPIPE", "broken pipe"}, + {33, "EDOM", "numerical argument out of domain"}, + {34, "ERANGE", "numerical result out of range"}, + {35, "EDEADLK", "resource deadlock avoided"}, + {36, "ENAMETOOLONG", "file name too long"}, + {37, "ENOLCK", "no locks available"}, + {38, "ENOSYS", "function not implemented"}, + {39, "ENOTEMPTY", "directory not empty"}, + {40, "ELOOP", "too many levels of symbolic links"}, + {42, "ENOMSG", "no message of desired type"}, + {43, "EIDRM", "identifier removed"}, + {44, "ECHRNG", "channel number out of range"}, + {45, "EL2NSYNC", "level 2 not synchronized"}, + {46, "EL3HLT", "level 3 halted"}, + {47, "EL3RST", "level 3 reset"}, + {48, "ELNRNG", "link number out of range"}, + {49, "EUNATCH", "protocol driver not attached"}, + {50, "ENOCSI", "no CSI structure available"}, + {51, "EL2HLT", "level 2 halted"}, + {52, "EBADE", "invalid exchange"}, + {53, "EBADR", "invalid request descriptor"}, + {54, "EXFULL", "exchange full"}, + {55, "ENOANO", "no anode"}, + {56, "EBADRQC", "invalid request code"}, + {57, "EBADSLT", "invalid slot"}, + {58, "EDEADLOCK", "file locking deadlock error"}, + {59, "EBFONT", "bad font file format"}, + {60, "ENOSTR", "device not a stream"}, + {61, "ENODATA", "no data available"}, + {62, "ETIME", "timer expired"}, + {63, "ENOSR", "out of streams resources"}, + {64, "ENONET", "machine is not on the network"}, + {65, "ENOPKG", "package not installed"}, + {66, "EREMOTE", "object is remote"}, + {67, "ENOLINK", "link has been severed"}, + {68, "EADV", "advertise error"}, + {69, "ESRMNT", "srmount error"}, + {70, "ECOMM", "communication error on send"}, + {71, "EPROTO", "protocol error"}, + {72, "EMULTIHOP", "multihop attempted"}, + {73, "EDOTDOT", "RFS specific error"}, + {74, "EBADMSG", "bad message"}, + {75, "EOVERFLOW", "value too large for defined data type"}, + {76, "ENOTUNIQ", "name not unique on network"}, + {77, "EBADFD", "file descriptor in bad state"}, + {78, "EREMCHG", "remote address changed"}, + {79, "ELIBACC", "can not access a needed shared library"}, + {80, "ELIBBAD", "accessing a corrupted shared library"}, + {81, "ELIBSCN", ".lib section in a.out corrupted"}, + {82, "ELIBMAX", "attempting to link in too many shared libraries"}, + {83, "ELIBEXEC", "cannot exec a shared library directly"}, + {84, "EILSEQ", "invalid or incomplete multibyte or wide character"}, + {85, "ERESTART", "interrupted system call should be restarted"}, + {86, "ESTRPIPE", "streams pipe error"}, + {87, "EUSERS", "too many users"}, + {88, "ENOTSOCK", "socket operation on non-socket"}, + {89, "EDESTADDRREQ", "destination address required"}, + {90, "EMSGSIZE", "message too long"}, + {91, "EPROTOTYPE", "protocol wrong type for socket"}, + {92, "ENOPROTOOPT", "protocol not available"}, + {93, "EPROTONOSUPPORT", "protocol not supported"}, + {94, "ESOCKTNOSUPPORT", "socket type not supported"}, + {95, "ENOTSUP", "operation not supported"}, + {96, "EPFNOSUPPORT", "protocol family not supported"}, + {97, "EAFNOSUPPORT", "address family not supported by protocol"}, + {98, "EADDRINUSE", "address already in use"}, + {99, "EADDRNOTAVAIL", "cannot assign requested address"}, + {100, "ENETDOWN", "network is down"}, + {101, "ENETUNREACH", "network is unreachable"}, + {102, "ENETRESET", "network dropped connection on reset"}, + {103, "ECONNABORTED", "software caused connection abort"}, + {104, "ECONNRESET", "connection reset by peer"}, + {105, "ENOBUFS", "no buffer space available"}, + {106, "EISCONN", "transport endpoint is already connected"}, + {107, "ENOTCONN", "transport endpoint is not connected"}, + {108, "ESHUTDOWN", "cannot send after transport endpoint shutdown"}, + {109, "ETOOMANYREFS", "too many references: cannot splice"}, + {110, "ETIMEDOUT", "connection timed out"}, + {111, "ECONNREFUSED", "connection refused"}, + {112, "EHOSTDOWN", "host is down"}, + {113, "EHOSTUNREACH", "no route to host"}, + {114, "EALREADY", "operation already in progress"}, + {115, "EINPROGRESS", "operation now in progress"}, + {116, "ESTALE", "stale file handle"}, + {117, "EUCLEAN", "structure needs cleaning"}, + {118, "ENOTNAM", "not a XENIX named type file"}, + {119, "ENAVAIL", "no XENIX semaphores available"}, + {120, "EISNAM", "is a named type file"}, + {121, "EREMOTEIO", "remote I/O error"}, + {122, "EDQUOT", "disk quota exceeded"}, + {123, "ENOMEDIUM", "no medium found"}, + {124, "EMEDIUMTYPE", "wrong medium type"}, + {125, "ECANCELED", "operation canceled"}, + {126, "ENOKEY", "required key not available"}, + {127, "EKEYEXPIRED", "key has expired"}, + {128, "EKEYREVOKED", "key has been revoked"}, + {129, "EKEYREJECTED", "key was rejected by service"}, + {130, "EOWNERDEAD", "owner died"}, + {131, "ENOTRECOVERABLE", "state not recoverable"}, + {132, "ERFKILL", "operation not possible due to RF-kill"}, + {133, "EHWPOISON", "memory page has hardware error"}, } // Signal table -var signals = [...]string{ - 1: "hangup", - 2: "interrupt", - 3: "quit", - 4: "illegal instruction", - 5: "trace/breakpoint trap", - 6: "aborted", - 7: "bus error", - 8: "floating point exception", - 9: "killed", - 10: "user defined signal 1", - 11: "segmentation fault", - 12: "user defined signal 2", - 13: "broken pipe", - 14: "alarm clock", - 15: "terminated", - 16: "stack fault", - 17: "child exited", - 18: "continued", - 19: "stopped (signal)", - 20: "stopped", - 21: "stopped (tty input)", - 22: "stopped (tty output)", - 23: "urgent I/O condition", - 24: "CPU time limit exceeded", - 25: "file size limit exceeded", - 26: "virtual timer expired", - 27: "profiling timer expired", - 28: "window changed", - 29: "I/O possible", - 30: "power failure", - 31: "bad system call", +var signalList = [...]struct { + num syscall.Signal + name string + desc string +}{ + {1, "SIGHUP", "hangup"}, + {2, "SIGINT", "interrupt"}, + {3, "SIGQUIT", "quit"}, + {4, "SIGILL", "illegal instruction"}, + {5, "SIGTRAP", "trace/breakpoint trap"}, + {6, "SIGABRT", "aborted"}, + {7, "SIGBUS", "bus error"}, + {8, "SIGFPE", "floating point exception"}, + {9, "SIGKILL", "killed"}, + {10, "SIGUSR1", "user defined signal 1"}, + {11, "SIGSEGV", "segmentation fault"}, + {12, "SIGUSR2", "user defined signal 2"}, + {13, "SIGPIPE", "broken pipe"}, + {14, "SIGALRM", "alarm clock"}, + {15, "SIGTERM", "terminated"}, + {16, "SIGSTKFLT", "stack fault"}, + {17, "SIGCHLD", "child exited"}, + {18, "SIGCONT", "continued"}, + {19, "SIGSTOP", "stopped (signal)"}, + {20, "SIGTSTP", "stopped"}, + {21, "SIGTTIN", "stopped (tty input)"}, + {22, "SIGTTOU", "stopped (tty output)"}, + {23, "SIGURG", "urgent I/O condition"}, + {24, "SIGXCPU", "CPU time limit exceeded"}, + {25, "SIGXFSZ", "file size limit exceeded"}, + {26, "SIGVTALRM", "virtual timer expired"}, + {27, "SIGPROF", "profiling timer expired"}, + {28, "SIGWINCH", "window changed"}, + {29, "SIGIO", "I/O possible"}, + {30, "SIGPWR", "power failure"}, + {31, "SIGSYS", "bad system call"}, } diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go index 26afbb8dedc..c1c1c01bcf5 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go @@ -11,6 +11,11 @@ package unix import "syscall" const ( + AAFS_MAGIC = 0x5a3c69f0 + ADFS_SUPER_MAGIC = 0xadf5 + AFFS_SUPER_MAGIC = 0xadff + AFS_FS_MAGIC = 0x6b414653 + AFS_SUPER_MAGIC = 0x5346414f AF_ALG = 0x26 AF_APPLETALK = 0x5 AF_ASH = 0x12 @@ -66,6 +71,7 @@ const ( ALG_SET_IV = 0x2 ALG_SET_KEY = 0x1 ALG_SET_OP = 0x3 + ANON_INODE_FS_MAGIC = 0x9041934 ARPHRD_6LOWPAN = 0x339 ARPHRD_ADAPT = 0x108 ARPHRD_APPLETLK = 0x8 @@ -133,6 +139,7 @@ const ( ARPHRD_VOID = 0xffff ARPHRD_VSOCKMON = 0x33a ARPHRD_X25 = 0x10f + AUTOFS_SUPER_MAGIC = 0x187 B0 = 0x0 B1000000 = 0x17 B110 = 0x3 @@ -164,6 +171,9 @@ const ( B75 = 0x2 B921600 = 0x16 B9600 = 0xd + BALLOON_KVM_MAGIC = 0x13661366 + BDEVFS_MAGIC = 0x62646576 + BINFMTFS_MAGIC = 0x42494e4d BLKBSZGET = 0x40081270 BLKBSZSET = 0x80081271 BLKFLSBUF = 0x20001261 @@ -188,6 +198,7 @@ const ( BPF_AND = 0x50 BPF_B = 0x10 BPF_DIV = 0x30 + BPF_FS_MAGIC = 0xcafe4a11 BPF_H = 0x8 BPF_IMM = 0x0 BPF_IND = 0x40 @@ -229,6 +240,8 @@ const ( BS0 = 0x0 BS1 = 0x8000 BSDLY = 0x8000 + BTRFS_SUPER_MAGIC = 0x9123683e + BTRFS_TEST_MAGIC = 0x73727279 CAN_BCM = 0x2 CAN_EFF_FLAG = 0x80000000 CAN_EFF_ID_BITS = 0x1d @@ -252,6 +265,8 @@ const ( CBAUD = 0xff CBAUDEX = 0x0 CFLUSH = 0xf + CGROUP2_SUPER_MAGIC = 0x63677270 + CGROUP_SUPER_MAGIC = 0x27e0eb CIBAUD = 0xff0000 CLOCAL = 0x8000 CLOCK_BOOTTIME = 0x7 @@ -294,10 +309,12 @@ const ( CLONE_VFORK = 0x4000 CLONE_VM = 0x100 CMSPAR = 0x40000000 + CODA_SUPER_MAGIC = 0x73757245 CR0 = 0x0 CR1 = 0x1000 CR2 = 0x2000 CR3 = 0x3000 + CRAMFS_MAGIC = 0x28cd3d45 CRDLY = 0x3000 CREAD = 0x800 CRTSCTS = 0x80000000 @@ -312,6 +329,9 @@ const ( CSTOP = 0x13 CSTOPB = 0x400 CSUSP = 0x1a + DAXFS_MAGIC = 0x64646178 + DEBUGFS_MAGIC = 0x64626720 + DEVPTS_SUPER_MAGIC = 0x1cd1 DT_BLK = 0x6 DT_CHR = 0x2 DT_DIR = 0x4 @@ -328,9 +348,12 @@ const ( ECHOKE = 0x1 ECHONL = 0x10 ECHOPRT = 0x20 + ECRYPTFS_SUPER_MAGIC = 0xf15f EFD_CLOEXEC = 0x80000 EFD_NONBLOCK = 0x800 EFD_SEMAPHORE = 0x1 + EFIVARFS_MAGIC = 0xde5e81e4 + EFS_SUPER_MAGIC = 0x414a53 ENCODING_DEFAULT = 0x0 ENCODING_FM_MARK = 0x3 ENCODING_FM_SPACE = 0x4 @@ -392,6 +415,7 @@ const ( ETH_P_ECONET = 0x18 ETH_P_EDSA = 0xdada ETH_P_ERSPAN = 0x88be + ETH_P_ERSPAN2 = 0x22eb ETH_P_FCOE = 0x8906 ETH_P_FIP = 0x8914 ETH_P_HDLC = 0x19 @@ -445,9 +469,14 @@ const ( ETH_P_WCCP = 0x883e ETH_P_X25 = 0x805 ETH_P_XDSA = 0xf8 + EXABYTE_ENABLE_NEST = 0xf0 + EXT2_SUPER_MAGIC = 0xef53 + EXT3_SUPER_MAGIC = 0xef53 + EXT4_SUPER_MAGIC = 0xef53 EXTA = 0xe EXTB = 0xf EXTPROC = 0x10000000 + F2FS_SUPER_MAGIC = 0xf2f52010 FALLOC_FL_COLLAPSE_RANGE = 0x8 FALLOC_FL_INSERT_RANGE = 0x20 FALLOC_FL_KEEP_SIZE = 0x1 @@ -481,6 +510,7 @@ const ( FS_POLICY_FLAGS_PAD_8 = 0x1 FS_POLICY_FLAGS_PAD_MASK = 0x3 FS_POLICY_FLAGS_VALID = 0x3 + FUTEXFS_SUPER_MAGIC = 0xbad1dea F_ADD_SEALS = 0x409 F_DUPFD = 0x0 F_DUPFD_CLOEXEC = 0x406 @@ -542,6 +572,49 @@ const ( GENL_UNS_ADMIN_PERM = 0x10 GRND_NONBLOCK = 0x1 GRND_RANDOM = 0x2 + HDIO_DRIVE_CMD = 0x31f + HDIO_DRIVE_CMD_AEB = 0x31e + HDIO_DRIVE_CMD_HDR_SIZE = 0x4 + HDIO_DRIVE_HOB_HDR_SIZE = 0x8 + HDIO_DRIVE_RESET = 0x31c + HDIO_DRIVE_TASK = 0x31e + HDIO_DRIVE_TASKFILE = 0x31d + HDIO_DRIVE_TASK_HDR_SIZE = 0x8 + HDIO_GETGEO = 0x301 + HDIO_GET_32BIT = 0x309 + HDIO_GET_ACOUSTIC = 0x30f + HDIO_GET_ADDRESS = 0x310 + HDIO_GET_BUSSTATE = 0x31a + HDIO_GET_DMA = 0x30b + HDIO_GET_IDENTITY = 0x30d + HDIO_GET_KEEPSETTINGS = 0x308 + HDIO_GET_MULTCOUNT = 0x304 + HDIO_GET_NICE = 0x30c + HDIO_GET_NOWERR = 0x30a + HDIO_GET_QDMA = 0x305 + HDIO_GET_UNMASKINTR = 0x302 + HDIO_GET_WCACHE = 0x30e + HDIO_OBSOLETE_IDENTITY = 0x307 + HDIO_SCAN_HWIF = 0x328 + HDIO_SET_32BIT = 0x324 + HDIO_SET_ACOUSTIC = 0x32c + HDIO_SET_ADDRESS = 0x32f + HDIO_SET_BUSSTATE = 0x32d + HDIO_SET_DMA = 0x326 + HDIO_SET_KEEPSETTINGS = 0x323 + HDIO_SET_MULTCOUNT = 0x321 + HDIO_SET_NICE = 0x329 + HDIO_SET_NOWERR = 0x325 + HDIO_SET_PIO_MODE = 0x327 + HDIO_SET_QDMA = 0x32e + HDIO_SET_UNMASKINTR = 0x322 + HDIO_SET_WCACHE = 0x32b + HDIO_SET_XFER = 0x306 + HDIO_TRISTATE_HWIF = 0x31b + HDIO_UNREGISTER_HWIF = 0x32a + HOSTFS_SUPER_MAGIC = 0xc0ffee + HPFS_SUPER_MAGIC = 0xf995e849 + HUGETLBFS_MAGIC = 0x958458f6 HUPCL = 0x4000 IBSHIFT = 0x10 ICANON = 0x100 @@ -792,12 +865,14 @@ const ( IP_UNICAST_IF = 0x32 IP_XFRM_POLICY = 0x11 ISIG = 0x80 + ISOFS_SUPER_MAGIC = 0x9660 ISTRIP = 0x20 IUCLC = 0x1000 IUTF8 = 0x4000 IXANY = 0x800 IXOFF = 0x400 IXON = 0x200 + JFFS2_SUPER_MAGIC = 0x72b6 KEYCTL_ASSUME_AUTHORITY = 0x10 KEYCTL_CHOWN = 0x4 KEYCTL_CLEAR = 0x7 @@ -893,9 +968,15 @@ const ( MCL_CURRENT = 0x2000 MCL_FUTURE = 0x4000 MCL_ONFAULT = 0x8000 + MINIX2_SUPER_MAGIC = 0x2468 + MINIX2_SUPER_MAGIC2 = 0x2478 + MINIX3_SUPER_MAGIC = 0x4d5a + MINIX_SUPER_MAGIC = 0x137f + MINIX_SUPER_MAGIC2 = 0x138f MNT_DETACH = 0x2 MNT_EXPIRE = 0x4 MNT_FORCE = 0x1 + MSDOS_SUPER_MAGIC = 0x4d44 MSG_BATCH = 0x40000 MSG_CMSG_CLOEXEC = 0x40000000 MSG_CONFIRM = 0x800 @@ -955,7 +1036,9 @@ const ( MS_SYNCHRONOUS = 0x10 MS_UNBINDABLE = 0x20000 MS_VERBOSE = 0x8000 + MTD_INODE_FS_MAGIC = 0x11307854 NAME_MAX = 0xff + NCP_SUPER_MAGIC = 0x564c NETLINK_ADD_MEMBERSHIP = 0x1 NETLINK_AUDIT = 0x9 NETLINK_BROADCAST_ERROR = 0x4 @@ -990,6 +1073,37 @@ const ( NETLINK_UNUSED = 0x1 NETLINK_USERSOCK = 0x2 NETLINK_XFRM = 0x6 + NFNETLINK_V0 = 0x0 + NFNLGRP_ACCT_QUOTA = 0x8 + NFNLGRP_CONNTRACK_DESTROY = 0x3 + NFNLGRP_CONNTRACK_EXP_DESTROY = 0x6 + NFNLGRP_CONNTRACK_EXP_NEW = 0x4 + NFNLGRP_CONNTRACK_EXP_UPDATE = 0x5 + NFNLGRP_CONNTRACK_NEW = 0x1 + NFNLGRP_CONNTRACK_UPDATE = 0x2 + NFNLGRP_MAX = 0x9 + NFNLGRP_NFTABLES = 0x7 + NFNLGRP_NFTRACE = 0x9 + NFNLGRP_NONE = 0x0 + NFNL_BATCH_MAX = 0x1 + NFNL_MSG_BATCH_BEGIN = 0x10 + NFNL_MSG_BATCH_END = 0x11 + NFNL_NFA_NEST = 0x8000 + NFNL_SUBSYS_ACCT = 0x7 + NFNL_SUBSYS_COUNT = 0xc + NFNL_SUBSYS_CTHELPER = 0x9 + NFNL_SUBSYS_CTNETLINK = 0x1 + NFNL_SUBSYS_CTNETLINK_EXP = 0x2 + NFNL_SUBSYS_CTNETLINK_TIMEOUT = 0x8 + NFNL_SUBSYS_IPSET = 0x6 + NFNL_SUBSYS_NFTABLES = 0xa + NFNL_SUBSYS_NFT_COMPAT = 0xb + NFNL_SUBSYS_NONE = 0x0 + NFNL_SUBSYS_OSF = 0x5 + NFNL_SUBSYS_QUEUE = 0x3 + NFNL_SUBSYS_ULOG = 0x4 + NFS_SUPER_MAGIC = 0x6969 + NILFS_SUPER_MAGIC = 0x3434 NL0 = 0x0 NL1 = 0x100 NL2 = 0x200 @@ -1024,6 +1138,8 @@ const ( NLM_F_REQUEST = 0x1 NLM_F_ROOT = 0x100 NOFLSH = 0x80000000 + NSFS_MAGIC = 0x6e736673 + OCFS2_SUPER_MAGIC = 0x7461636f OCRNL = 0x8 OFDEL = 0x80 OFILL = 0x40 @@ -1031,7 +1147,9 @@ const ( ONLCR = 0x2 ONLRET = 0x20 ONOCR = 0x10 + OPENPROM_SUPER_MAGIC = 0x9fa1 OPOST = 0x1 + OVERLAYFS_SUPER_MAGIC = 0x794c7630 O_ACCMODE = 0x3 O_APPEND = 0x400 O_ASYNC = 0x2000 @@ -1118,14 +1236,17 @@ const ( PERF_EVENT_IOC_ID = 0x40082407 PERF_EVENT_IOC_PAUSE_OUTPUT = 0x80042409 PERF_EVENT_IOC_PERIOD = 0x80082404 + PERF_EVENT_IOC_QUERY_BPF = 0xc008240a PERF_EVENT_IOC_REFRESH = 0x20002402 PERF_EVENT_IOC_RESET = 0x20002403 PERF_EVENT_IOC_SET_BPF = 0x80042408 PERF_EVENT_IOC_SET_FILTER = 0x80082406 PERF_EVENT_IOC_SET_OUTPUT = 0x20002405 + PIPEFS_MAGIC = 0x50495045 PRIO_PGRP = 0x1 PRIO_PROCESS = 0x0 PRIO_USER = 0x2 + PROC_SUPER_MAGIC = 0x9fa0 PROT_EXEC = 0x4 PROT_GROWSDOWN = 0x1000000 PROT_GROWSUP = 0x2000000 @@ -1232,6 +1353,7 @@ const ( PR_TSC_SIGSEGV = 0x2 PR_UNALIGN_NOPRINT = 0x1 PR_UNALIGN_SIGBUS = 0x2 + PSTOREFS_MAGIC = 0x6165676c PTRACE_ATTACH = 0x10 PTRACE_CONT = 0x7 PTRACE_DETACH = 0x11 @@ -1277,6 +1399,7 @@ const ( PTRACE_POKETEXT = 0x4 PTRACE_POKEUSR = 0x6 PTRACE_SECCOMP_GET_FILTER = 0x420c + PTRACE_SECCOMP_GET_METADATA = 0x420d PTRACE_SEIZE = 0x4206 PTRACE_SETEVRREGS = 0x15 PTRACE_SETFPREGS = 0xf @@ -1346,6 +1469,11 @@ const ( PT_VSR0 = 0x96 PT_VSR31 = 0xd4 PT_XER = 0x25 + QNX4_SUPER_MAGIC = 0x2f + QNX6_SUPER_MAGIC = 0x68191122 + RAMFS_MAGIC = 0x858458f6 + RDTGROUP_SUPER_MAGIC = 0x7655821 + REISERFS_SUPER_MAGIC = 0x52654973 RLIMIT_AS = 0x9 RLIMIT_CORE = 0x4 RLIMIT_CPU = 0x0 @@ -1531,6 +1659,8 @@ const ( SECCOMP_MODE_DISABLED = 0x0 SECCOMP_MODE_FILTER = 0x2 SECCOMP_MODE_STRICT = 0x1 + SECURITYFS_MAGIC = 0x73636673 + SELINUX_MAGIC = 0xf97cff8c SHUT_RD = 0x0 SHUT_RDWR = 0x2 SHUT_WR = 0x1 @@ -1615,6 +1745,23 @@ const ( SIOCSPGRP = 0x8902 SIOCSRARP = 0x8962 SIOCWANDEV = 0x894a + SMACK_MAGIC = 0x43415d53 + SMART_AUTOSAVE = 0xd2 + SMART_AUTO_OFFLINE = 0xdb + SMART_DISABLE = 0xd9 + SMART_ENABLE = 0xd8 + SMART_HCYL_PASS = 0xc2 + SMART_IMMEDIATE_OFFLINE = 0xd4 + SMART_LCYL_PASS = 0x4f + SMART_READ_LOG_SECTOR = 0xd5 + SMART_READ_THRESHOLDS = 0xd1 + SMART_READ_VALUES = 0xd0 + SMART_SAVE = 0xd3 + SMART_STATUS = 0xda + SMART_WRITE_LOG_SECTOR = 0xd6 + SMART_WRITE_THRESHOLDS = 0xd7 + SMB_SUPER_MAGIC = 0x517b + SOCKFS_MAGIC = 0x534f434b SOCK_CLOEXEC = 0x80000 SOCK_DCCP = 0x6 SOCK_DGRAM = 0x2 @@ -1725,6 +1872,8 @@ const ( SPLICE_F_MORE = 0x4 SPLICE_F_MOVE = 0x1 SPLICE_F_NONBLOCK = 0x2 + SQUASHFS_MAGIC = 0x73717368 + STACK_END_MAGIC = 0x57ac6e9d STATX_ALL = 0xfff STATX_ATIME = 0x20 STATX_ATTR_APPEND = 0x20 @@ -1746,6 +1895,7 @@ const ( STATX_TYPE = 0x1 STATX_UID = 0x8 STATX__RESERVED = 0x80000000 + SYSFS_MAGIC = 0x62656572 S_BLKSIZE = 0x200 S_IEXEC = 0x40 S_IFBLK = 0x6000 @@ -1931,7 +2081,27 @@ const ( TIOCSTOP = 0x2000746f TIOCSWINSZ = 0x80087467 TIOCVHANGUP = 0x5437 + TMPFS_MAGIC = 0x1021994 TOSTOP = 0x400000 + TPACKET_ALIGNMENT = 0x10 + TPACKET_HDRLEN = 0x34 + TP_STATUS_AVAILABLE = 0x0 + TP_STATUS_BLK_TMO = 0x20 + TP_STATUS_COPY = 0x2 + TP_STATUS_CSUMNOTREADY = 0x8 + TP_STATUS_CSUM_VALID = 0x80 + TP_STATUS_KERNEL = 0x0 + TP_STATUS_LOSING = 0x4 + TP_STATUS_SENDING = 0x2 + TP_STATUS_SEND_REQUEST = 0x1 + TP_STATUS_TS_RAW_HARDWARE = -0x80000000 + TP_STATUS_TS_SOFTWARE = 0x20000000 + TP_STATUS_TS_SYS_HARDWARE = 0x40000000 + TP_STATUS_USER = 0x1 + TP_STATUS_VLAN_TPID_VALID = 0x40 + TP_STATUS_VLAN_VALID = 0x10 + TP_STATUS_WRONG_FORMAT = 0x4 + TRACEFS_MAGIC = 0x74726163 TS_COMM_LEN = 0x20 TUNATTACHFILTER = 0x801054d5 TUNDETACHFILTER = 0x801054d6 @@ -1943,6 +2113,7 @@ const ( TUNGETVNETHDRSZ = 0x400454d7 TUNGETVNETLE = 0x400454dd TUNSETDEBUG = 0x800454c9 + TUNSETFILTEREBPF = 0x400454e1 TUNSETGROUP = 0x800454ce TUNSETIFF = 0x800454ca TUNSETIFINDEX = 0x800454da @@ -1953,13 +2124,17 @@ const ( TUNSETPERSIST = 0x800454cb TUNSETQUEUE = 0x800454d9 TUNSETSNDBUF = 0x800454d4 + TUNSETSTEERINGEBPF = 0x400454e0 TUNSETTXFILTER = 0x800454d1 TUNSETVNETBE = 0x800454de TUNSETVNETHDRSZ = 0x800454d8 TUNSETVNETLE = 0x800454dc + UDF_SUPER_MAGIC = 0x15013346 UMOUNT_NOFOLLOW = 0x8 + USBDEVICE_SUPER_MAGIC = 0x9fa2 UTIME_NOW = 0x3fffffff UTIME_OMIT = 0x3ffffffe + V9FS_MAGIC = 0x1021997 VDISCARD = 0x10 VEOF = 0x4 VEOL = 0x6 @@ -2001,6 +2176,86 @@ const ( WDIOC_SETPRETIMEOUT = 0xc0045708 WDIOC_SETTIMEOUT = 0xc0045706 WEXITED = 0x4 + WIN_ACKMEDIACHANGE = 0xdb + WIN_CHECKPOWERMODE1 = 0xe5 + WIN_CHECKPOWERMODE2 = 0x98 + WIN_DEVICE_RESET = 0x8 + WIN_DIAGNOSE = 0x90 + WIN_DOORLOCK = 0xde + WIN_DOORUNLOCK = 0xdf + WIN_DOWNLOAD_MICROCODE = 0x92 + WIN_FLUSH_CACHE = 0xe7 + WIN_FLUSH_CACHE_EXT = 0xea + WIN_FORMAT = 0x50 + WIN_GETMEDIASTATUS = 0xda + WIN_IDENTIFY = 0xec + WIN_IDENTIFY_DMA = 0xee + WIN_IDLEIMMEDIATE = 0xe1 + WIN_INIT = 0x60 + WIN_MEDIAEJECT = 0xed + WIN_MULTREAD = 0xc4 + WIN_MULTREAD_EXT = 0x29 + WIN_MULTWRITE = 0xc5 + WIN_MULTWRITE_EXT = 0x39 + WIN_NOP = 0x0 + WIN_PACKETCMD = 0xa0 + WIN_PIDENTIFY = 0xa1 + WIN_POSTBOOT = 0xdc + WIN_PREBOOT = 0xdd + WIN_QUEUED_SERVICE = 0xa2 + WIN_READ = 0x20 + WIN_READDMA = 0xc8 + WIN_READDMA_EXT = 0x25 + WIN_READDMA_ONCE = 0xc9 + WIN_READDMA_QUEUED = 0xc7 + WIN_READDMA_QUEUED_EXT = 0x26 + WIN_READ_BUFFER = 0xe4 + WIN_READ_EXT = 0x24 + WIN_READ_LONG = 0x22 + WIN_READ_LONG_ONCE = 0x23 + WIN_READ_NATIVE_MAX = 0xf8 + WIN_READ_NATIVE_MAX_EXT = 0x27 + WIN_READ_ONCE = 0x21 + WIN_RECAL = 0x10 + WIN_RESTORE = 0x10 + WIN_SECURITY_DISABLE = 0xf6 + WIN_SECURITY_ERASE_PREPARE = 0xf3 + WIN_SECURITY_ERASE_UNIT = 0xf4 + WIN_SECURITY_FREEZE_LOCK = 0xf5 + WIN_SECURITY_SET_PASS = 0xf1 + WIN_SECURITY_UNLOCK = 0xf2 + WIN_SEEK = 0x70 + WIN_SETFEATURES = 0xef + WIN_SETIDLE1 = 0xe3 + WIN_SETIDLE2 = 0x97 + WIN_SETMULT = 0xc6 + WIN_SET_MAX = 0xf9 + WIN_SET_MAX_EXT = 0x37 + WIN_SLEEPNOW1 = 0xe6 + WIN_SLEEPNOW2 = 0x99 + WIN_SMART = 0xb0 + WIN_SPECIFY = 0x91 + WIN_SRST = 0x8 + WIN_STANDBY = 0xe2 + WIN_STANDBY2 = 0x96 + WIN_STANDBYNOW1 = 0xe0 + WIN_STANDBYNOW2 = 0x94 + WIN_VERIFY = 0x40 + WIN_VERIFY_EXT = 0x42 + WIN_VERIFY_ONCE = 0x41 + WIN_WRITE = 0x30 + WIN_WRITEDMA = 0xca + WIN_WRITEDMA_EXT = 0x35 + WIN_WRITEDMA_ONCE = 0xcb + WIN_WRITEDMA_QUEUED = 0xcc + WIN_WRITEDMA_QUEUED_EXT = 0x36 + WIN_WRITE_BUFFER = 0xe8 + WIN_WRITE_EXT = 0x34 + WIN_WRITE_LONG = 0x32 + WIN_WRITE_LONG_ONCE = 0x33 + WIN_WRITE_ONCE = 0x31 + WIN_WRITE_SAME = 0xe9 + WIN_WRITE_VERIFY = 0x3c WNOHANG = 0x1 WNOTHREAD = 0x20000000 WNOWAIT = 0x1000000 @@ -2010,7 +2265,9 @@ const ( XATTR_CREATE = 0x1 XATTR_REPLACE = 0x2 XCASE = 0x4000 + XENFS_SUPER_MAGIC = 0xabba1974 XTABS = 0xc00 + ZSMALLOC_MAGIC = 0x58295829 ) // Errors @@ -2190,172 +2447,180 @@ const ( ) // Error table -var errors = [...]string{ - 1: "operation not permitted", - 2: "no such file or directory", - 3: "no such process", - 4: "interrupted system call", - 5: "input/output error", - 6: "no such device or address", - 7: "argument list too long", - 8: "exec format error", - 9: "bad file descriptor", - 10: "no child processes", - 11: "resource temporarily unavailable", - 12: "cannot allocate memory", - 13: "permission denied", - 14: "bad address", - 15: "block device required", - 16: "device or resource busy", - 17: "file exists", - 18: "invalid cross-device link", - 19: "no such device", - 20: "not a directory", - 21: "is a directory", - 22: "invalid argument", - 23: "too many open files in system", - 24: "too many open files", - 25: "inappropriate ioctl for device", - 26: "text file busy", - 27: "file too large", - 28: "no space left on device", - 29: "illegal seek", - 30: "read-only file system", - 31: "too many links", - 32: "broken pipe", - 33: "numerical argument out of domain", - 34: "numerical result out of range", - 35: "resource deadlock avoided", - 36: "file name too long", - 37: "no locks available", - 38: "function not implemented", - 39: "directory not empty", - 40: "too many levels of symbolic links", - 42: "no message of desired type", - 43: "identifier removed", - 44: "channel number out of range", - 45: "level 2 not synchronized", - 46: "level 3 halted", - 47: "level 3 reset", - 48: "link number out of range", - 49: "protocol driver not attached", - 50: "no CSI structure available", - 51: "level 2 halted", - 52: "invalid exchange", - 53: "invalid request descriptor", - 54: "exchange full", - 55: "no anode", - 56: "invalid request code", - 57: "invalid slot", - 58: "file locking deadlock error", - 59: "bad font file format", - 60: "device not a stream", - 61: "no data available", - 62: "timer expired", - 63: "out of streams resources", - 64: "machine is not on the network", - 65: "package not installed", - 66: "object is remote", - 67: "link has been severed", - 68: "advertise error", - 69: "srmount error", - 70: "communication error on send", - 71: "protocol error", - 72: "multihop attempted", - 73: "RFS specific error", - 74: "bad message", - 75: "value too large for defined data type", - 76: "name not unique on network", - 77: "file descriptor in bad state", - 78: "remote address changed", - 79: "can not access a needed shared library", - 80: "accessing a corrupted shared library", - 81: ".lib section in a.out corrupted", - 82: "attempting to link in too many shared libraries", - 83: "cannot exec a shared library directly", - 84: "invalid or incomplete multibyte or wide character", - 85: "interrupted system call should be restarted", - 86: "streams pipe error", - 87: "too many users", - 88: "socket operation on non-socket", - 89: "destination address required", - 90: "message too long", - 91: "protocol wrong type for socket", - 92: "protocol not available", - 93: "protocol not supported", - 94: "socket type not supported", - 95: "operation not supported", - 96: "protocol family not supported", - 97: "address family not supported by protocol", - 98: "address already in use", - 99: "cannot assign requested address", - 100: "network is down", - 101: "network is unreachable", - 102: "network dropped connection on reset", - 103: "software caused connection abort", - 104: "connection reset by peer", - 105: "no buffer space available", - 106: "transport endpoint is already connected", - 107: "transport endpoint is not connected", - 108: "cannot send after transport endpoint shutdown", - 109: "too many references: cannot splice", - 110: "connection timed out", - 111: "connection refused", - 112: "host is down", - 113: "no route to host", - 114: "operation already in progress", - 115: "operation now in progress", - 116: "stale file handle", - 117: "structure needs cleaning", - 118: "not a XENIX named type file", - 119: "no XENIX semaphores available", - 120: "is a named type file", - 121: "remote I/O error", - 122: "disk quota exceeded", - 123: "no medium found", - 124: "wrong medium type", - 125: "operation canceled", - 126: "required key not available", - 127: "key has expired", - 128: "key has been revoked", - 129: "key was rejected by service", - 130: "owner died", - 131: "state not recoverable", - 132: "operation not possible due to RF-kill", - 133: "memory page has hardware error", +var errorList = [...]struct { + num syscall.Errno + name string + desc string +}{ + {1, "EPERM", "operation not permitted"}, + {2, "ENOENT", "no such file or directory"}, + {3, "ESRCH", "no such process"}, + {4, "EINTR", "interrupted system call"}, + {5, "EIO", "input/output error"}, + {6, "ENXIO", "no such device or address"}, + {7, "E2BIG", "argument list too long"}, + {8, "ENOEXEC", "exec format error"}, + {9, "EBADF", "bad file descriptor"}, + {10, "ECHILD", "no child processes"}, + {11, "EAGAIN", "resource temporarily unavailable"}, + {12, "ENOMEM", "cannot allocate memory"}, + {13, "EACCES", "permission denied"}, + {14, "EFAULT", "bad address"}, + {15, "ENOTBLK", "block device required"}, + {16, "EBUSY", "device or resource busy"}, + {17, "EEXIST", "file exists"}, + {18, "EXDEV", "invalid cross-device link"}, + {19, "ENODEV", "no such device"}, + {20, "ENOTDIR", "not a directory"}, + {21, "EISDIR", "is a directory"}, + {22, "EINVAL", "invalid argument"}, + {23, "ENFILE", "too many open files in system"}, + {24, "EMFILE", "too many open files"}, + {25, "ENOTTY", "inappropriate ioctl for device"}, + {26, "ETXTBSY", "text file busy"}, + {27, "EFBIG", "file too large"}, + {28, "ENOSPC", "no space left on device"}, + {29, "ESPIPE", "illegal seek"}, + {30, "EROFS", "read-only file system"}, + {31, "EMLINK", "too many links"}, + {32, "EPIPE", "broken pipe"}, + {33, "EDOM", "numerical argument out of domain"}, + {34, "ERANGE", "numerical result out of range"}, + {35, "EDEADLK", "resource deadlock avoided"}, + {36, "ENAMETOOLONG", "file name too long"}, + {37, "ENOLCK", "no locks available"}, + {38, "ENOSYS", "function not implemented"}, + {39, "ENOTEMPTY", "directory not empty"}, + {40, "ELOOP", "too many levels of symbolic links"}, + {42, "ENOMSG", "no message of desired type"}, + {43, "EIDRM", "identifier removed"}, + {44, "ECHRNG", "channel number out of range"}, + {45, "EL2NSYNC", "level 2 not synchronized"}, + {46, "EL3HLT", "level 3 halted"}, + {47, "EL3RST", "level 3 reset"}, + {48, "ELNRNG", "link number out of range"}, + {49, "EUNATCH", "protocol driver not attached"}, + {50, "ENOCSI", "no CSI structure available"}, + {51, "EL2HLT", "level 2 halted"}, + {52, "EBADE", "invalid exchange"}, + {53, "EBADR", "invalid request descriptor"}, + {54, "EXFULL", "exchange full"}, + {55, "ENOANO", "no anode"}, + {56, "EBADRQC", "invalid request code"}, + {57, "EBADSLT", "invalid slot"}, + {58, "EDEADLOCK", "file locking deadlock error"}, + {59, "EBFONT", "bad font file format"}, + {60, "ENOSTR", "device not a stream"}, + {61, "ENODATA", "no data available"}, + {62, "ETIME", "timer expired"}, + {63, "ENOSR", "out of streams resources"}, + {64, "ENONET", "machine is not on the network"}, + {65, "ENOPKG", "package not installed"}, + {66, "EREMOTE", "object is remote"}, + {67, "ENOLINK", "link has been severed"}, + {68, "EADV", "advertise error"}, + {69, "ESRMNT", "srmount error"}, + {70, "ECOMM", "communication error on send"}, + {71, "EPROTO", "protocol error"}, + {72, "EMULTIHOP", "multihop attempted"}, + {73, "EDOTDOT", "RFS specific error"}, + {74, "EBADMSG", "bad message"}, + {75, "EOVERFLOW", "value too large for defined data type"}, + {76, "ENOTUNIQ", "name not unique on network"}, + {77, "EBADFD", "file descriptor in bad state"}, + {78, "EREMCHG", "remote address changed"}, + {79, "ELIBACC", "can not access a needed shared library"}, + {80, "ELIBBAD", "accessing a corrupted shared library"}, + {81, "ELIBSCN", ".lib section in a.out corrupted"}, + {82, "ELIBMAX", "attempting to link in too many shared libraries"}, + {83, "ELIBEXEC", "cannot exec a shared library directly"}, + {84, "EILSEQ", "invalid or incomplete multibyte or wide character"}, + {85, "ERESTART", "interrupted system call should be restarted"}, + {86, "ESTRPIPE", "streams pipe error"}, + {87, "EUSERS", "too many users"}, + {88, "ENOTSOCK", "socket operation on non-socket"}, + {89, "EDESTADDRREQ", "destination address required"}, + {90, "EMSGSIZE", "message too long"}, + {91, "EPROTOTYPE", "protocol wrong type for socket"}, + {92, "ENOPROTOOPT", "protocol not available"}, + {93, "EPROTONOSUPPORT", "protocol not supported"}, + {94, "ESOCKTNOSUPPORT", "socket type not supported"}, + {95, "ENOTSUP", "operation not supported"}, + {96, "EPFNOSUPPORT", "protocol family not supported"}, + {97, "EAFNOSUPPORT", "address family not supported by protocol"}, + {98, "EADDRINUSE", "address already in use"}, + {99, "EADDRNOTAVAIL", "cannot assign requested address"}, + {100, "ENETDOWN", "network is down"}, + {101, "ENETUNREACH", "network is unreachable"}, + {102, "ENETRESET", "network dropped connection on reset"}, + {103, "ECONNABORTED", "software caused connection abort"}, + {104, "ECONNRESET", "connection reset by peer"}, + {105, "ENOBUFS", "no buffer space available"}, + {106, "EISCONN", "transport endpoint is already connected"}, + {107, "ENOTCONN", "transport endpoint is not connected"}, + {108, "ESHUTDOWN", "cannot send after transport endpoint shutdown"}, + {109, "ETOOMANYREFS", "too many references: cannot splice"}, + {110, "ETIMEDOUT", "connection timed out"}, + {111, "ECONNREFUSED", "connection refused"}, + {112, "EHOSTDOWN", "host is down"}, + {113, "EHOSTUNREACH", "no route to host"}, + {114, "EALREADY", "operation already in progress"}, + {115, "EINPROGRESS", "operation now in progress"}, + {116, "ESTALE", "stale file handle"}, + {117, "EUCLEAN", "structure needs cleaning"}, + {118, "ENOTNAM", "not a XENIX named type file"}, + {119, "ENAVAIL", "no XENIX semaphores available"}, + {120, "EISNAM", "is a named type file"}, + {121, "EREMOTEIO", "remote I/O error"}, + {122, "EDQUOT", "disk quota exceeded"}, + {123, "ENOMEDIUM", "no medium found"}, + {124, "EMEDIUMTYPE", "wrong medium type"}, + {125, "ECANCELED", "operation canceled"}, + {126, "ENOKEY", "required key not available"}, + {127, "EKEYEXPIRED", "key has expired"}, + {128, "EKEYREVOKED", "key has been revoked"}, + {129, "EKEYREJECTED", "key was rejected by service"}, + {130, "EOWNERDEAD", "owner died"}, + {131, "ENOTRECOVERABLE", "state not recoverable"}, + {132, "ERFKILL", "operation not possible due to RF-kill"}, + {133, "EHWPOISON", "memory page has hardware error"}, } // Signal table -var signals = [...]string{ - 1: "hangup", - 2: "interrupt", - 3: "quit", - 4: "illegal instruction", - 5: "trace/breakpoint trap", - 6: "aborted", - 7: "bus error", - 8: "floating point exception", - 9: "killed", - 10: "user defined signal 1", - 11: "segmentation fault", - 12: "user defined signal 2", - 13: "broken pipe", - 14: "alarm clock", - 15: "terminated", - 16: "stack fault", - 17: "child exited", - 18: "continued", - 19: "stopped (signal)", - 20: "stopped", - 21: "stopped (tty input)", - 22: "stopped (tty output)", - 23: "urgent I/O condition", - 24: "CPU time limit exceeded", - 25: "file size limit exceeded", - 26: "virtual timer expired", - 27: "profiling timer expired", - 28: "window changed", - 29: "I/O possible", - 30: "power failure", - 31: "bad system call", +var signalList = [...]struct { + num syscall.Signal + name string + desc string +}{ + {1, "SIGHUP", "hangup"}, + {2, "SIGINT", "interrupt"}, + {3, "SIGQUIT", "quit"}, + {4, "SIGILL", "illegal instruction"}, + {5, "SIGTRAP", "trace/breakpoint trap"}, + {6, "SIGABRT", "aborted"}, + {7, "SIGBUS", "bus error"}, + {8, "SIGFPE", "floating point exception"}, + {9, "SIGKILL", "killed"}, + {10, "SIGUSR1", "user defined signal 1"}, + {11, "SIGSEGV", "segmentation fault"}, + {12, "SIGUSR2", "user defined signal 2"}, + {13, "SIGPIPE", "broken pipe"}, + {14, "SIGALRM", "alarm clock"}, + {15, "SIGTERM", "terminated"}, + {16, "SIGSTKFLT", "stack fault"}, + {17, "SIGCHLD", "child exited"}, + {18, "SIGCONT", "continued"}, + {19, "SIGSTOP", "stopped (signal)"}, + {20, "SIGTSTP", "stopped"}, + {21, "SIGTTIN", "stopped (tty input)"}, + {22, "SIGTTOU", "stopped (tty output)"}, + {23, "SIGURG", "urgent I/O condition"}, + {24, "SIGXCPU", "CPU time limit exceeded"}, + {25, "SIGXFSZ", "file size limit exceeded"}, + {26, "SIGVTALRM", "virtual timer expired"}, + {27, "SIGPROF", "profiling timer expired"}, + {28, "SIGWINCH", "window changed"}, + {29, "SIGIO", "I/O possible"}, + {30, "SIGPWR", "power failure"}, + {31, "SIGSYS", "bad system call"}, } diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go index eeb9941debd..c7583e15eb4 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go @@ -11,6 +11,11 @@ package unix import "syscall" const ( + AAFS_MAGIC = 0x5a3c69f0 + ADFS_SUPER_MAGIC = 0xadf5 + AFFS_SUPER_MAGIC = 0xadff + AFS_FS_MAGIC = 0x6b414653 + AFS_SUPER_MAGIC = 0x5346414f AF_ALG = 0x26 AF_APPLETALK = 0x5 AF_ASH = 0x12 @@ -66,6 +71,7 @@ const ( ALG_SET_IV = 0x2 ALG_SET_KEY = 0x1 ALG_SET_OP = 0x3 + ANON_INODE_FS_MAGIC = 0x9041934 ARPHRD_6LOWPAN = 0x339 ARPHRD_ADAPT = 0x108 ARPHRD_APPLETLK = 0x8 @@ -133,6 +139,7 @@ const ( ARPHRD_VOID = 0xffff ARPHRD_VSOCKMON = 0x33a ARPHRD_X25 = 0x10f + AUTOFS_SUPER_MAGIC = 0x187 B0 = 0x0 B1000000 = 0x1008 B110 = 0x3 @@ -164,6 +171,9 @@ const ( B75 = 0x2 B921600 = 0x1007 B9600 = 0xd + BALLOON_KVM_MAGIC = 0x13661366 + BDEVFS_MAGIC = 0x62646576 + BINFMTFS_MAGIC = 0x42494e4d BLKBSZGET = 0x80081270 BLKBSZSET = 0x40081271 BLKFLSBUF = 0x1261 @@ -188,6 +198,7 @@ const ( BPF_AND = 0x50 BPF_B = 0x10 BPF_DIV = 0x30 + BPF_FS_MAGIC = 0xcafe4a11 BPF_H = 0x8 BPF_IMM = 0x0 BPF_IND = 0x40 @@ -229,6 +240,8 @@ const ( BS0 = 0x0 BS1 = 0x2000 BSDLY = 0x2000 + BTRFS_SUPER_MAGIC = 0x9123683e + BTRFS_TEST_MAGIC = 0x73727279 CAN_BCM = 0x2 CAN_EFF_FLAG = 0x80000000 CAN_EFF_ID_BITS = 0x1d @@ -252,6 +265,8 @@ const ( CBAUD = 0x100f CBAUDEX = 0x1000 CFLUSH = 0xf + CGROUP2_SUPER_MAGIC = 0x63677270 + CGROUP_SUPER_MAGIC = 0x27e0eb CIBAUD = 0x100f0000 CLOCAL = 0x800 CLOCK_BOOTTIME = 0x7 @@ -294,10 +309,12 @@ const ( CLONE_VFORK = 0x4000 CLONE_VM = 0x100 CMSPAR = 0x40000000 + CODA_SUPER_MAGIC = 0x73757245 CR0 = 0x0 CR1 = 0x200 CR2 = 0x400 CR3 = 0x600 + CRAMFS_MAGIC = 0x28cd3d45 CRDLY = 0x600 CREAD = 0x80 CRTSCTS = 0x80000000 @@ -312,6 +329,9 @@ const ( CSTOP = 0x13 CSTOPB = 0x40 CSUSP = 0x1a + DAXFS_MAGIC = 0x64646178 + DEBUGFS_MAGIC = 0x64626720 + DEVPTS_SUPER_MAGIC = 0x1cd1 DT_BLK = 0x6 DT_CHR = 0x2 DT_DIR = 0x4 @@ -328,9 +348,12 @@ const ( ECHOKE = 0x800 ECHONL = 0x40 ECHOPRT = 0x400 + ECRYPTFS_SUPER_MAGIC = 0xf15f EFD_CLOEXEC = 0x80000 EFD_NONBLOCK = 0x800 EFD_SEMAPHORE = 0x1 + EFIVARFS_MAGIC = 0xde5e81e4 + EFS_SUPER_MAGIC = 0x414a53 ENCODING_DEFAULT = 0x0 ENCODING_FM_MARK = 0x3 ENCODING_FM_SPACE = 0x4 @@ -392,6 +415,7 @@ const ( ETH_P_ECONET = 0x18 ETH_P_EDSA = 0xdada ETH_P_ERSPAN = 0x88be + ETH_P_ERSPAN2 = 0x22eb ETH_P_FCOE = 0x8906 ETH_P_FIP = 0x8914 ETH_P_HDLC = 0x19 @@ -445,9 +469,14 @@ const ( ETH_P_WCCP = 0x883e ETH_P_X25 = 0x805 ETH_P_XDSA = 0xf8 + EXABYTE_ENABLE_NEST = 0xf0 + EXT2_SUPER_MAGIC = 0xef53 + EXT3_SUPER_MAGIC = 0xef53 + EXT4_SUPER_MAGIC = 0xef53 EXTA = 0xe EXTB = 0xf EXTPROC = 0x10000 + F2FS_SUPER_MAGIC = 0xf2f52010 FALLOC_FL_COLLAPSE_RANGE = 0x8 FALLOC_FL_INSERT_RANGE = 0x20 FALLOC_FL_KEEP_SIZE = 0x1 @@ -481,6 +510,7 @@ const ( FS_POLICY_FLAGS_PAD_8 = 0x1 FS_POLICY_FLAGS_PAD_MASK = 0x3 FS_POLICY_FLAGS_VALID = 0x3 + FUTEXFS_SUPER_MAGIC = 0xbad1dea F_ADD_SEALS = 0x409 F_DUPFD = 0x0 F_DUPFD_CLOEXEC = 0x406 @@ -542,6 +572,49 @@ const ( GENL_UNS_ADMIN_PERM = 0x10 GRND_NONBLOCK = 0x1 GRND_RANDOM = 0x2 + HDIO_DRIVE_CMD = 0x31f + HDIO_DRIVE_CMD_AEB = 0x31e + HDIO_DRIVE_CMD_HDR_SIZE = 0x4 + HDIO_DRIVE_HOB_HDR_SIZE = 0x8 + HDIO_DRIVE_RESET = 0x31c + HDIO_DRIVE_TASK = 0x31e + HDIO_DRIVE_TASKFILE = 0x31d + HDIO_DRIVE_TASK_HDR_SIZE = 0x8 + HDIO_GETGEO = 0x301 + HDIO_GET_32BIT = 0x309 + HDIO_GET_ACOUSTIC = 0x30f + HDIO_GET_ADDRESS = 0x310 + HDIO_GET_BUSSTATE = 0x31a + HDIO_GET_DMA = 0x30b + HDIO_GET_IDENTITY = 0x30d + HDIO_GET_KEEPSETTINGS = 0x308 + HDIO_GET_MULTCOUNT = 0x304 + HDIO_GET_NICE = 0x30c + HDIO_GET_NOWERR = 0x30a + HDIO_GET_QDMA = 0x305 + HDIO_GET_UNMASKINTR = 0x302 + HDIO_GET_WCACHE = 0x30e + HDIO_OBSOLETE_IDENTITY = 0x307 + HDIO_SCAN_HWIF = 0x328 + HDIO_SET_32BIT = 0x324 + HDIO_SET_ACOUSTIC = 0x32c + HDIO_SET_ADDRESS = 0x32f + HDIO_SET_BUSSTATE = 0x32d + HDIO_SET_DMA = 0x326 + HDIO_SET_KEEPSETTINGS = 0x323 + HDIO_SET_MULTCOUNT = 0x321 + HDIO_SET_NICE = 0x329 + HDIO_SET_NOWERR = 0x325 + HDIO_SET_PIO_MODE = 0x327 + HDIO_SET_QDMA = 0x32e + HDIO_SET_UNMASKINTR = 0x322 + HDIO_SET_WCACHE = 0x32b + HDIO_SET_XFER = 0x306 + HDIO_TRISTATE_HWIF = 0x31b + HDIO_UNREGISTER_HWIF = 0x32a + HOSTFS_SUPER_MAGIC = 0xc0ffee + HPFS_SUPER_MAGIC = 0xf995e849 + HUGETLBFS_MAGIC = 0x958458f6 HUPCL = 0x400 IBSHIFT = 0x10 ICANON = 0x2 @@ -792,12 +865,14 @@ const ( IP_UNICAST_IF = 0x32 IP_XFRM_POLICY = 0x11 ISIG = 0x1 + ISOFS_SUPER_MAGIC = 0x9660 ISTRIP = 0x20 IUCLC = 0x200 IUTF8 = 0x4000 IXANY = 0x800 IXOFF = 0x1000 IXON = 0x400 + JFFS2_SUPER_MAGIC = 0x72b6 KEYCTL_ASSUME_AUTHORITY = 0x10 KEYCTL_CHOWN = 0x4 KEYCTL_CLEAR = 0x7 @@ -893,9 +968,15 @@ const ( MCL_CURRENT = 0x1 MCL_FUTURE = 0x2 MCL_ONFAULT = 0x4 + MINIX2_SUPER_MAGIC = 0x2468 + MINIX2_SUPER_MAGIC2 = 0x2478 + MINIX3_SUPER_MAGIC = 0x4d5a + MINIX_SUPER_MAGIC = 0x137f + MINIX_SUPER_MAGIC2 = 0x138f MNT_DETACH = 0x2 MNT_EXPIRE = 0x4 MNT_FORCE = 0x1 + MSDOS_SUPER_MAGIC = 0x4d44 MSG_BATCH = 0x40000 MSG_CMSG_CLOEXEC = 0x40000000 MSG_CONFIRM = 0x800 @@ -955,7 +1036,9 @@ const ( MS_SYNCHRONOUS = 0x10 MS_UNBINDABLE = 0x20000 MS_VERBOSE = 0x8000 + MTD_INODE_FS_MAGIC = 0x11307854 NAME_MAX = 0xff + NCP_SUPER_MAGIC = 0x564c NETLINK_ADD_MEMBERSHIP = 0x1 NETLINK_AUDIT = 0x9 NETLINK_BROADCAST_ERROR = 0x4 @@ -990,6 +1073,37 @@ const ( NETLINK_UNUSED = 0x1 NETLINK_USERSOCK = 0x2 NETLINK_XFRM = 0x6 + NFNETLINK_V0 = 0x0 + NFNLGRP_ACCT_QUOTA = 0x8 + NFNLGRP_CONNTRACK_DESTROY = 0x3 + NFNLGRP_CONNTRACK_EXP_DESTROY = 0x6 + NFNLGRP_CONNTRACK_EXP_NEW = 0x4 + NFNLGRP_CONNTRACK_EXP_UPDATE = 0x5 + NFNLGRP_CONNTRACK_NEW = 0x1 + NFNLGRP_CONNTRACK_UPDATE = 0x2 + NFNLGRP_MAX = 0x9 + NFNLGRP_NFTABLES = 0x7 + NFNLGRP_NFTRACE = 0x9 + NFNLGRP_NONE = 0x0 + NFNL_BATCH_MAX = 0x1 + NFNL_MSG_BATCH_BEGIN = 0x10 + NFNL_MSG_BATCH_END = 0x11 + NFNL_NFA_NEST = 0x8000 + NFNL_SUBSYS_ACCT = 0x7 + NFNL_SUBSYS_COUNT = 0xc + NFNL_SUBSYS_CTHELPER = 0x9 + NFNL_SUBSYS_CTNETLINK = 0x1 + NFNL_SUBSYS_CTNETLINK_EXP = 0x2 + NFNL_SUBSYS_CTNETLINK_TIMEOUT = 0x8 + NFNL_SUBSYS_IPSET = 0x6 + NFNL_SUBSYS_NFTABLES = 0xa + NFNL_SUBSYS_NFT_COMPAT = 0xb + NFNL_SUBSYS_NONE = 0x0 + NFNL_SUBSYS_OSF = 0x5 + NFNL_SUBSYS_QUEUE = 0x3 + NFNL_SUBSYS_ULOG = 0x4 + NFS_SUPER_MAGIC = 0x6969 + NILFS_SUPER_MAGIC = 0x3434 NL0 = 0x0 NL1 = 0x100 NLA_ALIGNTO = 0x4 @@ -1022,6 +1136,8 @@ const ( NLM_F_REQUEST = 0x1 NLM_F_ROOT = 0x100 NOFLSH = 0x80 + NSFS_MAGIC = 0x6e736673 + OCFS2_SUPER_MAGIC = 0x7461636f OCRNL = 0x8 OFDEL = 0x80 OFILL = 0x40 @@ -1029,7 +1145,9 @@ const ( ONLCR = 0x4 ONLRET = 0x20 ONOCR = 0x10 + OPENPROM_SUPER_MAGIC = 0x9fa1 OPOST = 0x1 + OVERLAYFS_SUPER_MAGIC = 0x794c7630 O_ACCMODE = 0x3 O_APPEND = 0x400 O_ASYNC = 0x2000 @@ -1116,14 +1234,17 @@ const ( PERF_EVENT_IOC_ID = 0x80082407 PERF_EVENT_IOC_PAUSE_OUTPUT = 0x40042409 PERF_EVENT_IOC_PERIOD = 0x40082404 + PERF_EVENT_IOC_QUERY_BPF = 0xc008240a PERF_EVENT_IOC_REFRESH = 0x2402 PERF_EVENT_IOC_RESET = 0x2403 PERF_EVENT_IOC_SET_BPF = 0x40042408 PERF_EVENT_IOC_SET_FILTER = 0x40082406 PERF_EVENT_IOC_SET_OUTPUT = 0x2405 + PIPEFS_MAGIC = 0x50495045 PRIO_PGRP = 0x1 PRIO_PROCESS = 0x0 PRIO_USER = 0x2 + PROC_SUPER_MAGIC = 0x9fa0 PROT_EXEC = 0x4 PROT_GROWSDOWN = 0x1000000 PROT_GROWSUP = 0x2000000 @@ -1229,6 +1350,7 @@ const ( PR_TSC_SIGSEGV = 0x2 PR_UNALIGN_NOPRINT = 0x1 PR_UNALIGN_SIGBUS = 0x2 + PSTOREFS_MAGIC = 0x6165676c PTRACE_ATTACH = 0x10 PTRACE_CONT = 0x7 PTRACE_DETACH = 0x11 @@ -1281,6 +1403,7 @@ const ( PTRACE_POKE_SYSTEM_CALL = 0x5008 PTRACE_PROT = 0x15 PTRACE_SECCOMP_GET_FILTER = 0x420c + PTRACE_SECCOMP_GET_METADATA = 0x420d PTRACE_SEIZE = 0x4206 PTRACE_SETOPTIONS = 0x4200 PTRACE_SETREGS = 0xd @@ -1350,6 +1473,11 @@ const ( PT_ORIGGPR2 = 0xd0 PT_PSWADDR = 0x8 PT_PSWMASK = 0x0 + QNX4_SUPER_MAGIC = 0x2f + QNX6_SUPER_MAGIC = 0x68191122 + RAMFS_MAGIC = 0x858458f6 + RDTGROUP_SUPER_MAGIC = 0x7655821 + REISERFS_SUPER_MAGIC = 0x52654973 RLIMIT_AS = 0x9 RLIMIT_CORE = 0x4 RLIMIT_CPU = 0x0 @@ -1535,6 +1663,8 @@ const ( SECCOMP_MODE_DISABLED = 0x0 SECCOMP_MODE_FILTER = 0x2 SECCOMP_MODE_STRICT = 0x1 + SECURITYFS_MAGIC = 0x73636673 + SELINUX_MAGIC = 0xf97cff8c SHUT_RD = 0x0 SHUT_RDWR = 0x2 SHUT_WR = 0x1 @@ -1619,6 +1749,23 @@ const ( SIOCSPGRP = 0x8902 SIOCSRARP = 0x8962 SIOCWANDEV = 0x894a + SMACK_MAGIC = 0x43415d53 + SMART_AUTOSAVE = 0xd2 + SMART_AUTO_OFFLINE = 0xdb + SMART_DISABLE = 0xd9 + SMART_ENABLE = 0xd8 + SMART_HCYL_PASS = 0xc2 + SMART_IMMEDIATE_OFFLINE = 0xd4 + SMART_LCYL_PASS = 0x4f + SMART_READ_LOG_SECTOR = 0xd5 + SMART_READ_THRESHOLDS = 0xd1 + SMART_READ_VALUES = 0xd0 + SMART_SAVE = 0xd3 + SMART_STATUS = 0xda + SMART_WRITE_LOG_SECTOR = 0xd6 + SMART_WRITE_THRESHOLDS = 0xd7 + SMB_SUPER_MAGIC = 0x517b + SOCKFS_MAGIC = 0x534f434b SOCK_CLOEXEC = 0x80000 SOCK_DCCP = 0x6 SOCK_DGRAM = 0x2 @@ -1729,6 +1876,8 @@ const ( SPLICE_F_MORE = 0x4 SPLICE_F_MOVE = 0x1 SPLICE_F_NONBLOCK = 0x2 + SQUASHFS_MAGIC = 0x73717368 + STACK_END_MAGIC = 0x57ac6e9d STATX_ALL = 0xfff STATX_ATIME = 0x20 STATX_ATTR_APPEND = 0x20 @@ -1750,6 +1899,7 @@ const ( STATX_TYPE = 0x1 STATX_UID = 0x8 STATX__RESERVED = 0x80000000 + SYSFS_MAGIC = 0x62656572 S_BLKSIZE = 0x200 S_IEXEC = 0x40 S_IFBLK = 0x6000 @@ -1931,7 +2081,27 @@ const ( TIOCSTI = 0x5412 TIOCSWINSZ = 0x5414 TIOCVHANGUP = 0x5437 + TMPFS_MAGIC = 0x1021994 TOSTOP = 0x100 + TPACKET_ALIGNMENT = 0x10 + TPACKET_HDRLEN = 0x34 + TP_STATUS_AVAILABLE = 0x0 + TP_STATUS_BLK_TMO = 0x20 + TP_STATUS_COPY = 0x2 + TP_STATUS_CSUMNOTREADY = 0x8 + TP_STATUS_CSUM_VALID = 0x80 + TP_STATUS_KERNEL = 0x0 + TP_STATUS_LOSING = 0x4 + TP_STATUS_SENDING = 0x2 + TP_STATUS_SEND_REQUEST = 0x1 + TP_STATUS_TS_RAW_HARDWARE = -0x80000000 + TP_STATUS_TS_SOFTWARE = 0x20000000 + TP_STATUS_TS_SYS_HARDWARE = 0x40000000 + TP_STATUS_USER = 0x1 + TP_STATUS_VLAN_TPID_VALID = 0x40 + TP_STATUS_VLAN_VALID = 0x10 + TP_STATUS_WRONG_FORMAT = 0x4 + TRACEFS_MAGIC = 0x74726163 TS_COMM_LEN = 0x20 TUNATTACHFILTER = 0x401054d5 TUNDETACHFILTER = 0x401054d6 @@ -1943,6 +2113,7 @@ const ( TUNGETVNETHDRSZ = 0x800454d7 TUNGETVNETLE = 0x800454dd TUNSETDEBUG = 0x400454c9 + TUNSETFILTEREBPF = 0x800454e1 TUNSETGROUP = 0x400454ce TUNSETIFF = 0x400454ca TUNSETIFINDEX = 0x400454da @@ -1953,13 +2124,17 @@ const ( TUNSETPERSIST = 0x400454cb TUNSETQUEUE = 0x400454d9 TUNSETSNDBUF = 0x400454d4 + TUNSETSTEERINGEBPF = 0x800454e0 TUNSETTXFILTER = 0x400454d1 TUNSETVNETBE = 0x400454de TUNSETVNETHDRSZ = 0x400454d8 TUNSETVNETLE = 0x400454dc + UDF_SUPER_MAGIC = 0x15013346 UMOUNT_NOFOLLOW = 0x8 + USBDEVICE_SUPER_MAGIC = 0x9fa2 UTIME_NOW = 0x3fffffff UTIME_OMIT = 0x3ffffffe + V9FS_MAGIC = 0x1021997 VDISCARD = 0xd VEOF = 0x4 VEOL = 0xb @@ -2001,6 +2176,86 @@ const ( WDIOC_SETPRETIMEOUT = 0xc0045708 WDIOC_SETTIMEOUT = 0xc0045706 WEXITED = 0x4 + WIN_ACKMEDIACHANGE = 0xdb + WIN_CHECKPOWERMODE1 = 0xe5 + WIN_CHECKPOWERMODE2 = 0x98 + WIN_DEVICE_RESET = 0x8 + WIN_DIAGNOSE = 0x90 + WIN_DOORLOCK = 0xde + WIN_DOORUNLOCK = 0xdf + WIN_DOWNLOAD_MICROCODE = 0x92 + WIN_FLUSH_CACHE = 0xe7 + WIN_FLUSH_CACHE_EXT = 0xea + WIN_FORMAT = 0x50 + WIN_GETMEDIASTATUS = 0xda + WIN_IDENTIFY = 0xec + WIN_IDENTIFY_DMA = 0xee + WIN_IDLEIMMEDIATE = 0xe1 + WIN_INIT = 0x60 + WIN_MEDIAEJECT = 0xed + WIN_MULTREAD = 0xc4 + WIN_MULTREAD_EXT = 0x29 + WIN_MULTWRITE = 0xc5 + WIN_MULTWRITE_EXT = 0x39 + WIN_NOP = 0x0 + WIN_PACKETCMD = 0xa0 + WIN_PIDENTIFY = 0xa1 + WIN_POSTBOOT = 0xdc + WIN_PREBOOT = 0xdd + WIN_QUEUED_SERVICE = 0xa2 + WIN_READ = 0x20 + WIN_READDMA = 0xc8 + WIN_READDMA_EXT = 0x25 + WIN_READDMA_ONCE = 0xc9 + WIN_READDMA_QUEUED = 0xc7 + WIN_READDMA_QUEUED_EXT = 0x26 + WIN_READ_BUFFER = 0xe4 + WIN_READ_EXT = 0x24 + WIN_READ_LONG = 0x22 + WIN_READ_LONG_ONCE = 0x23 + WIN_READ_NATIVE_MAX = 0xf8 + WIN_READ_NATIVE_MAX_EXT = 0x27 + WIN_READ_ONCE = 0x21 + WIN_RECAL = 0x10 + WIN_RESTORE = 0x10 + WIN_SECURITY_DISABLE = 0xf6 + WIN_SECURITY_ERASE_PREPARE = 0xf3 + WIN_SECURITY_ERASE_UNIT = 0xf4 + WIN_SECURITY_FREEZE_LOCK = 0xf5 + WIN_SECURITY_SET_PASS = 0xf1 + WIN_SECURITY_UNLOCK = 0xf2 + WIN_SEEK = 0x70 + WIN_SETFEATURES = 0xef + WIN_SETIDLE1 = 0xe3 + WIN_SETIDLE2 = 0x97 + WIN_SETMULT = 0xc6 + WIN_SET_MAX = 0xf9 + WIN_SET_MAX_EXT = 0x37 + WIN_SLEEPNOW1 = 0xe6 + WIN_SLEEPNOW2 = 0x99 + WIN_SMART = 0xb0 + WIN_SPECIFY = 0x91 + WIN_SRST = 0x8 + WIN_STANDBY = 0xe2 + WIN_STANDBY2 = 0x96 + WIN_STANDBYNOW1 = 0xe0 + WIN_STANDBYNOW2 = 0x94 + WIN_VERIFY = 0x40 + WIN_VERIFY_EXT = 0x42 + WIN_VERIFY_ONCE = 0x41 + WIN_WRITE = 0x30 + WIN_WRITEDMA = 0xca + WIN_WRITEDMA_EXT = 0x35 + WIN_WRITEDMA_ONCE = 0xcb + WIN_WRITEDMA_QUEUED = 0xcc + WIN_WRITEDMA_QUEUED_EXT = 0x36 + WIN_WRITE_BUFFER = 0xe8 + WIN_WRITE_EXT = 0x34 + WIN_WRITE_LONG = 0x32 + WIN_WRITE_LONG_ONCE = 0x33 + WIN_WRITE_ONCE = 0x31 + WIN_WRITE_SAME = 0xe9 + WIN_WRITE_VERIFY = 0x3c WNOHANG = 0x1 WNOTHREAD = 0x20000000 WNOWAIT = 0x1000000 @@ -2010,7 +2265,9 @@ const ( XATTR_CREATE = 0x1 XATTR_REPLACE = 0x2 XCASE = 0x4 + XENFS_SUPER_MAGIC = 0xabba1974 XTABS = 0x1800 + ZSMALLOC_MAGIC = 0x58295829 ) // Errors @@ -2190,171 +2447,179 @@ const ( ) // Error table -var errors = [...]string{ - 1: "operation not permitted", - 2: "no such file or directory", - 3: "no such process", - 4: "interrupted system call", - 5: "input/output error", - 6: "no such device or address", - 7: "argument list too long", - 8: "exec format error", - 9: "bad file descriptor", - 10: "no child processes", - 11: "resource temporarily unavailable", - 12: "cannot allocate memory", - 13: "permission denied", - 14: "bad address", - 15: "block device required", - 16: "device or resource busy", - 17: "file exists", - 18: "invalid cross-device link", - 19: "no such device", - 20: "not a directory", - 21: "is a directory", - 22: "invalid argument", - 23: "too many open files in system", - 24: "too many open files", - 25: "inappropriate ioctl for device", - 26: "text file busy", - 27: "file too large", - 28: "no space left on device", - 29: "illegal seek", - 30: "read-only file system", - 31: "too many links", - 32: "broken pipe", - 33: "numerical argument out of domain", - 34: "numerical result out of range", - 35: "resource deadlock avoided", - 36: "file name too long", - 37: "no locks available", - 38: "function not implemented", - 39: "directory not empty", - 40: "too many levels of symbolic links", - 42: "no message of desired type", - 43: "identifier removed", - 44: "channel number out of range", - 45: "level 2 not synchronized", - 46: "level 3 halted", - 47: "level 3 reset", - 48: "link number out of range", - 49: "protocol driver not attached", - 50: "no CSI structure available", - 51: "level 2 halted", - 52: "invalid exchange", - 53: "invalid request descriptor", - 54: "exchange full", - 55: "no anode", - 56: "invalid request code", - 57: "invalid slot", - 59: "bad font file format", - 60: "device not a stream", - 61: "no data available", - 62: "timer expired", - 63: "out of streams resources", - 64: "machine is not on the network", - 65: "package not installed", - 66: "object is remote", - 67: "link has been severed", - 68: "advertise error", - 69: "srmount error", - 70: "communication error on send", - 71: "protocol error", - 72: "multihop attempted", - 73: "RFS specific error", - 74: "bad message", - 75: "value too large for defined data type", - 76: "name not unique on network", - 77: "file descriptor in bad state", - 78: "remote address changed", - 79: "can not access a needed shared library", - 80: "accessing a corrupted shared library", - 81: ".lib section in a.out corrupted", - 82: "attempting to link in too many shared libraries", - 83: "cannot exec a shared library directly", - 84: "invalid or incomplete multibyte or wide character", - 85: "interrupted system call should be restarted", - 86: "streams pipe error", - 87: "too many users", - 88: "socket operation on non-socket", - 89: "destination address required", - 90: "message too long", - 91: "protocol wrong type for socket", - 92: "protocol not available", - 93: "protocol not supported", - 94: "socket type not supported", - 95: "operation not supported", - 96: "protocol family not supported", - 97: "address family not supported by protocol", - 98: "address already in use", - 99: "cannot assign requested address", - 100: "network is down", - 101: "network is unreachable", - 102: "network dropped connection on reset", - 103: "software caused connection abort", - 104: "connection reset by peer", - 105: "no buffer space available", - 106: "transport endpoint is already connected", - 107: "transport endpoint is not connected", - 108: "cannot send after transport endpoint shutdown", - 109: "too many references: cannot splice", - 110: "connection timed out", - 111: "connection refused", - 112: "host is down", - 113: "no route to host", - 114: "operation already in progress", - 115: "operation now in progress", - 116: "stale file handle", - 117: "structure needs cleaning", - 118: "not a XENIX named type file", - 119: "no XENIX semaphores available", - 120: "is a named type file", - 121: "remote I/O error", - 122: "disk quota exceeded", - 123: "no medium found", - 124: "wrong medium type", - 125: "operation canceled", - 126: "required key not available", - 127: "key has expired", - 128: "key has been revoked", - 129: "key was rejected by service", - 130: "owner died", - 131: "state not recoverable", - 132: "operation not possible due to RF-kill", - 133: "memory page has hardware error", +var errorList = [...]struct { + num syscall.Errno + name string + desc string +}{ + {1, "EPERM", "operation not permitted"}, + {2, "ENOENT", "no such file or directory"}, + {3, "ESRCH", "no such process"}, + {4, "EINTR", "interrupted system call"}, + {5, "EIO", "input/output error"}, + {6, "ENXIO", "no such device or address"}, + {7, "E2BIG", "argument list too long"}, + {8, "ENOEXEC", "exec format error"}, + {9, "EBADF", "bad file descriptor"}, + {10, "ECHILD", "no child processes"}, + {11, "EAGAIN", "resource temporarily unavailable"}, + {12, "ENOMEM", "cannot allocate memory"}, + {13, "EACCES", "permission denied"}, + {14, "EFAULT", "bad address"}, + {15, "ENOTBLK", "block device required"}, + {16, "EBUSY", "device or resource busy"}, + {17, "EEXIST", "file exists"}, + {18, "EXDEV", "invalid cross-device link"}, + {19, "ENODEV", "no such device"}, + {20, "ENOTDIR", "not a directory"}, + {21, "EISDIR", "is a directory"}, + {22, "EINVAL", "invalid argument"}, + {23, "ENFILE", "too many open files in system"}, + {24, "EMFILE", "too many open files"}, + {25, "ENOTTY", "inappropriate ioctl for device"}, + {26, "ETXTBSY", "text file busy"}, + {27, "EFBIG", "file too large"}, + {28, "ENOSPC", "no space left on device"}, + {29, "ESPIPE", "illegal seek"}, + {30, "EROFS", "read-only file system"}, + {31, "EMLINK", "too many links"}, + {32, "EPIPE", "broken pipe"}, + {33, "EDOM", "numerical argument out of domain"}, + {34, "ERANGE", "numerical result out of range"}, + {35, "EDEADLK", "resource deadlock avoided"}, + {36, "ENAMETOOLONG", "file name too long"}, + {37, "ENOLCK", "no locks available"}, + {38, "ENOSYS", "function not implemented"}, + {39, "ENOTEMPTY", "directory not empty"}, + {40, "ELOOP", "too many levels of symbolic links"}, + {42, "ENOMSG", "no message of desired type"}, + {43, "EIDRM", "identifier removed"}, + {44, "ECHRNG", "channel number out of range"}, + {45, "EL2NSYNC", "level 2 not synchronized"}, + {46, "EL3HLT", "level 3 halted"}, + {47, "EL3RST", "level 3 reset"}, + {48, "ELNRNG", "link number out of range"}, + {49, "EUNATCH", "protocol driver not attached"}, + {50, "ENOCSI", "no CSI structure available"}, + {51, "EL2HLT", "level 2 halted"}, + {52, "EBADE", "invalid exchange"}, + {53, "EBADR", "invalid request descriptor"}, + {54, "EXFULL", "exchange full"}, + {55, "ENOANO", "no anode"}, + {56, "EBADRQC", "invalid request code"}, + {57, "EBADSLT", "invalid slot"}, + {59, "EBFONT", "bad font file format"}, + {60, "ENOSTR", "device not a stream"}, + {61, "ENODATA", "no data available"}, + {62, "ETIME", "timer expired"}, + {63, "ENOSR", "out of streams resources"}, + {64, "ENONET", "machine is not on the network"}, + {65, "ENOPKG", "package not installed"}, + {66, "EREMOTE", "object is remote"}, + {67, "ENOLINK", "link has been severed"}, + {68, "EADV", "advertise error"}, + {69, "ESRMNT", "srmount error"}, + {70, "ECOMM", "communication error on send"}, + {71, "EPROTO", "protocol error"}, + {72, "EMULTIHOP", "multihop attempted"}, + {73, "EDOTDOT", "RFS specific error"}, + {74, "EBADMSG", "bad message"}, + {75, "EOVERFLOW", "value too large for defined data type"}, + {76, "ENOTUNIQ", "name not unique on network"}, + {77, "EBADFD", "file descriptor in bad state"}, + {78, "EREMCHG", "remote address changed"}, + {79, "ELIBACC", "can not access a needed shared library"}, + {80, "ELIBBAD", "accessing a corrupted shared library"}, + {81, "ELIBSCN", ".lib section in a.out corrupted"}, + {82, "ELIBMAX", "attempting to link in too many shared libraries"}, + {83, "ELIBEXEC", "cannot exec a shared library directly"}, + {84, "EILSEQ", "invalid or incomplete multibyte or wide character"}, + {85, "ERESTART", "interrupted system call should be restarted"}, + {86, "ESTRPIPE", "streams pipe error"}, + {87, "EUSERS", "too many users"}, + {88, "ENOTSOCK", "socket operation on non-socket"}, + {89, "EDESTADDRREQ", "destination address required"}, + {90, "EMSGSIZE", "message too long"}, + {91, "EPROTOTYPE", "protocol wrong type for socket"}, + {92, "ENOPROTOOPT", "protocol not available"}, + {93, "EPROTONOSUPPORT", "protocol not supported"}, + {94, "ESOCKTNOSUPPORT", "socket type not supported"}, + {95, "ENOTSUP", "operation not supported"}, + {96, "EPFNOSUPPORT", "protocol family not supported"}, + {97, "EAFNOSUPPORT", "address family not supported by protocol"}, + {98, "EADDRINUSE", "address already in use"}, + {99, "EADDRNOTAVAIL", "cannot assign requested address"}, + {100, "ENETDOWN", "network is down"}, + {101, "ENETUNREACH", "network is unreachable"}, + {102, "ENETRESET", "network dropped connection on reset"}, + {103, "ECONNABORTED", "software caused connection abort"}, + {104, "ECONNRESET", "connection reset by peer"}, + {105, "ENOBUFS", "no buffer space available"}, + {106, "EISCONN", "transport endpoint is already connected"}, + {107, "ENOTCONN", "transport endpoint is not connected"}, + {108, "ESHUTDOWN", "cannot send after transport endpoint shutdown"}, + {109, "ETOOMANYREFS", "too many references: cannot splice"}, + {110, "ETIMEDOUT", "connection timed out"}, + {111, "ECONNREFUSED", "connection refused"}, + {112, "EHOSTDOWN", "host is down"}, + {113, "EHOSTUNREACH", "no route to host"}, + {114, "EALREADY", "operation already in progress"}, + {115, "EINPROGRESS", "operation now in progress"}, + {116, "ESTALE", "stale file handle"}, + {117, "EUCLEAN", "structure needs cleaning"}, + {118, "ENOTNAM", "not a XENIX named type file"}, + {119, "ENAVAIL", "no XENIX semaphores available"}, + {120, "EISNAM", "is a named type file"}, + {121, "EREMOTEIO", "remote I/O error"}, + {122, "EDQUOT", "disk quota exceeded"}, + {123, "ENOMEDIUM", "no medium found"}, + {124, "EMEDIUMTYPE", "wrong medium type"}, + {125, "ECANCELED", "operation canceled"}, + {126, "ENOKEY", "required key not available"}, + {127, "EKEYEXPIRED", "key has expired"}, + {128, "EKEYREVOKED", "key has been revoked"}, + {129, "EKEYREJECTED", "key was rejected by service"}, + {130, "EOWNERDEAD", "owner died"}, + {131, "ENOTRECOVERABLE", "state not recoverable"}, + {132, "ERFKILL", "operation not possible due to RF-kill"}, + {133, "EHWPOISON", "memory page has hardware error"}, } // Signal table -var signals = [...]string{ - 1: "hangup", - 2: "interrupt", - 3: "quit", - 4: "illegal instruction", - 5: "trace/breakpoint trap", - 6: "aborted", - 7: "bus error", - 8: "floating point exception", - 9: "killed", - 10: "user defined signal 1", - 11: "segmentation fault", - 12: "user defined signal 2", - 13: "broken pipe", - 14: "alarm clock", - 15: "terminated", - 16: "stack fault", - 17: "child exited", - 18: "continued", - 19: "stopped (signal)", - 20: "stopped", - 21: "stopped (tty input)", - 22: "stopped (tty output)", - 23: "urgent I/O condition", - 24: "CPU time limit exceeded", - 25: "file size limit exceeded", - 26: "virtual timer expired", - 27: "profiling timer expired", - 28: "window changed", - 29: "I/O possible", - 30: "power failure", - 31: "bad system call", +var signalList = [...]struct { + num syscall.Signal + name string + desc string +}{ + {1, "SIGHUP", "hangup"}, + {2, "SIGINT", "interrupt"}, + {3, "SIGQUIT", "quit"}, + {4, "SIGILL", "illegal instruction"}, + {5, "SIGTRAP", "trace/breakpoint trap"}, + {6, "SIGABRT", "aborted"}, + {7, "SIGBUS", "bus error"}, + {8, "SIGFPE", "floating point exception"}, + {9, "SIGKILL", "killed"}, + {10, "SIGUSR1", "user defined signal 1"}, + {11, "SIGSEGV", "segmentation fault"}, + {12, "SIGUSR2", "user defined signal 2"}, + {13, "SIGPIPE", "broken pipe"}, + {14, "SIGALRM", "alarm clock"}, + {15, "SIGTERM", "terminated"}, + {16, "SIGSTKFLT", "stack fault"}, + {17, "SIGCHLD", "child exited"}, + {18, "SIGCONT", "continued"}, + {19, "SIGSTOP", "stopped (signal)"}, + {20, "SIGTSTP", "stopped"}, + {21, "SIGTTIN", "stopped (tty input)"}, + {22, "SIGTTOU", "stopped (tty output)"}, + {23, "SIGURG", "urgent I/O condition"}, + {24, "SIGXCPU", "CPU time limit exceeded"}, + {25, "SIGXFSZ", "file size limit exceeded"}, + {26, "SIGVTALRM", "virtual timer expired"}, + {27, "SIGPROF", "profiling timer expired"}, + {28, "SIGWINCH", "window changed"}, + {29, "SIGIO", "I/O possible"}, + {30, "SIGPWR", "power failure"}, + {31, "SIGSYS", "bad system call"}, } diff --git a/vendor/golang.org/x/sys/unix/zerrors_netbsd_386.go b/vendor/golang.org/x/sys/unix/zerrors_netbsd_386.go index 1612b660916..cd93ce0d85e 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_netbsd_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_netbsd_386.go @@ -159,6 +159,7 @@ const ( CLONE_VFORK = 0x4000 CLONE_VM = 0x100 CREAD = 0x800 + CRTSCTS = 0x10000 CS5 = 0x0 CS6 = 0x100 CS7 = 0x200 @@ -1583,137 +1584,145 @@ const ( ) // Error table -var errors = [...]string{ - 1: "operation not permitted", - 2: "no such file or directory", - 3: "no such process", - 4: "interrupted system call", - 5: "input/output error", - 6: "device not configured", - 7: "argument list too long", - 8: "exec format error", - 9: "bad file descriptor", - 10: "no child processes", - 11: "resource deadlock avoided", - 12: "cannot allocate memory", - 13: "permission denied", - 14: "bad address", - 15: "block device required", - 16: "device busy", - 17: "file exists", - 18: "cross-device link", - 19: "operation not supported by device", - 20: "not a directory", - 21: "is a directory", - 22: "invalid argument", - 23: "too many open files in system", - 24: "too many open files", - 25: "inappropriate ioctl for device", - 26: "text file busy", - 27: "file too large", - 28: "no space left on device", - 29: "illegal seek", - 30: "read-only file system", - 31: "too many links", - 32: "broken pipe", - 33: "numerical argument out of domain", - 34: "result too large or too small", - 35: "resource temporarily unavailable", - 36: "operation now in progress", - 37: "operation already in progress", - 38: "socket operation on non-socket", - 39: "destination address required", - 40: "message too long", - 41: "protocol wrong type for socket", - 42: "protocol option not available", - 43: "protocol not supported", - 44: "socket type not supported", - 45: "operation not supported", - 46: "protocol family not supported", - 47: "address family not supported by protocol family", - 48: "address already in use", - 49: "can't assign requested address", - 50: "network is down", - 51: "network is unreachable", - 52: "network dropped connection on reset", - 53: "software caused connection abort", - 54: "connection reset by peer", - 55: "no buffer space available", - 56: "socket is already connected", - 57: "socket is not connected", - 58: "can't send after socket shutdown", - 59: "too many references: can't splice", - 60: "connection timed out", - 61: "connection refused", - 62: "too many levels of symbolic links", - 63: "file name too long", - 64: "host is down", - 65: "no route to host", - 66: "directory not empty", - 67: "too many processes", - 68: "too many users", - 69: "disc quota exceeded", - 70: "stale NFS file handle", - 71: "too many levels of remote in path", - 72: "RPC struct is bad", - 73: "RPC version wrong", - 74: "RPC prog. not avail", - 75: "program version wrong", - 76: "bad procedure for program", - 77: "no locks available", - 78: "function not implemented", - 79: "inappropriate file type or format", - 80: "authentication error", - 81: "need authenticator", - 82: "identifier removed", - 83: "no message of desired type", - 84: "value too large to be stored in data type", - 85: "illegal byte sequence", - 86: "not supported", - 87: "operation Canceled", - 88: "bad or Corrupt message", - 89: "no message available", - 90: "no STREAM resources", - 91: "not a STREAM", - 92: "STREAM ioctl timeout", - 93: "attribute not found", - 94: "multihop attempted", - 95: "link has been severed", - 96: "protocol error", +var errorList = [...]struct { + num syscall.Errno + name string + desc string +}{ + {1, "EPERM", "operation not permitted"}, + {2, "ENOENT", "no such file or directory"}, + {3, "ESRCH", "no such process"}, + {4, "EINTR", "interrupted system call"}, + {5, "EIO", "input/output error"}, + {6, "ENXIO", "device not configured"}, + {7, "E2BIG", "argument list too long"}, + {8, "ENOEXEC", "exec format error"}, + {9, "EBADF", "bad file descriptor"}, + {10, "ECHILD", "no child processes"}, + {11, "EDEADLK", "resource deadlock avoided"}, + {12, "ENOMEM", "cannot allocate memory"}, + {13, "EACCES", "permission denied"}, + {14, "EFAULT", "bad address"}, + {15, "ENOTBLK", "block device required"}, + {16, "EBUSY", "device busy"}, + {17, "EEXIST", "file exists"}, + {18, "EXDEV", "cross-device link"}, + {19, "ENODEV", "operation not supported by device"}, + {20, "ENOTDIR", "not a directory"}, + {21, "EISDIR", "is a directory"}, + {22, "EINVAL", "invalid argument"}, + {23, "ENFILE", "too many open files in system"}, + {24, "EMFILE", "too many open files"}, + {25, "ENOTTY", "inappropriate ioctl for device"}, + {26, "ETXTBSY", "text file busy"}, + {27, "EFBIG", "file too large"}, + {28, "ENOSPC", "no space left on device"}, + {29, "ESPIPE", "illegal seek"}, + {30, "EROFS", "read-only file system"}, + {31, "EMLINK", "too many links"}, + {32, "EPIPE", "broken pipe"}, + {33, "EDOM", "numerical argument out of domain"}, + {34, "ERANGE", "result too large or too small"}, + {35, "EAGAIN", "resource temporarily unavailable"}, + {36, "EINPROGRESS", "operation now in progress"}, + {37, "EALREADY", "operation already in progress"}, + {38, "ENOTSOCK", "socket operation on non-socket"}, + {39, "EDESTADDRREQ", "destination address required"}, + {40, "EMSGSIZE", "message too long"}, + {41, "EPROTOTYPE", "protocol wrong type for socket"}, + {42, "ENOPROTOOPT", "protocol option not available"}, + {43, "EPROTONOSUPPORT", "protocol not supported"}, + {44, "ESOCKTNOSUPPORT", "socket type not supported"}, + {45, "EOPNOTSUPP", "operation not supported"}, + {46, "EPFNOSUPPORT", "protocol family not supported"}, + {47, "EAFNOSUPPORT", "address family not supported by protocol family"}, + {48, "EADDRINUSE", "address already in use"}, + {49, "EADDRNOTAVAIL", "can't assign requested address"}, + {50, "ENETDOWN", "network is down"}, + {51, "ENETUNREACH", "network is unreachable"}, + {52, "ENETRESET", "network dropped connection on reset"}, + {53, "ECONNABORTED", "software caused connection abort"}, + {54, "ECONNRESET", "connection reset by peer"}, + {55, "ENOBUFS", "no buffer space available"}, + {56, "EISCONN", "socket is already connected"}, + {57, "ENOTCONN", "socket is not connected"}, + {58, "ESHUTDOWN", "can't send after socket shutdown"}, + {59, "ETOOMANYREFS", "too many references: can't splice"}, + {60, "ETIMEDOUT", "connection timed out"}, + {61, "ECONNREFUSED", "connection refused"}, + {62, "ELOOP", "too many levels of symbolic links"}, + {63, "ENAMETOOLONG", "file name too long"}, + {64, "EHOSTDOWN", "host is down"}, + {65, "EHOSTUNREACH", "no route to host"}, + {66, "ENOTEMPTY", "directory not empty"}, + {67, "EPROCLIM", "too many processes"}, + {68, "EUSERS", "too many users"}, + {69, "EDQUOT", "disc quota exceeded"}, + {70, "ESTALE", "stale NFS file handle"}, + {71, "EREMOTE", "too many levels of remote in path"}, + {72, "EBADRPC", "RPC struct is bad"}, + {73, "ERPCMISMATCH", "RPC version wrong"}, + {74, "EPROGUNAVAIL", "RPC prog. not avail"}, + {75, "EPROGMISMATCH", "program version wrong"}, + {76, "EPROCUNAVAIL", "bad procedure for program"}, + {77, "ENOLCK", "no locks available"}, + {78, "ENOSYS", "function not implemented"}, + {79, "EFTYPE", "inappropriate file type or format"}, + {80, "EAUTH", "authentication error"}, + {81, "ENEEDAUTH", "need authenticator"}, + {82, "EIDRM", "identifier removed"}, + {83, "ENOMSG", "no message of desired type"}, + {84, "EOVERFLOW", "value too large to be stored in data type"}, + {85, "EILSEQ", "illegal byte sequence"}, + {86, "ENOTSUP", "not supported"}, + {87, "ECANCELED", "operation Canceled"}, + {88, "EBADMSG", "bad or Corrupt message"}, + {89, "ENODATA", "no message available"}, + {90, "ENOSR", "no STREAM resources"}, + {91, "ENOSTR", "not a STREAM"}, + {92, "ETIME", "STREAM ioctl timeout"}, + {93, "ENOATTR", "attribute not found"}, + {94, "EMULTIHOP", "multihop attempted"}, + {95, "ENOLINK", "link has been severed"}, + {96, "ELAST", "protocol error"}, } // Signal table -var signals = [...]string{ - 1: "hangup", - 2: "interrupt", - 3: "quit", - 4: "illegal instruction", - 5: "trace/BPT trap", - 6: "abort trap", - 7: "EMT trap", - 8: "floating point exception", - 9: "killed", - 10: "bus error", - 11: "segmentation fault", - 12: "bad system call", - 13: "broken pipe", - 14: "alarm clock", - 15: "terminated", - 16: "urgent I/O condition", - 17: "stopped (signal)", - 18: "stopped", - 19: "continued", - 20: "child exited", - 21: "stopped (tty input)", - 22: "stopped (tty output)", - 23: "I/O possible", - 24: "cputime limit exceeded", - 25: "filesize limit exceeded", - 26: "virtual timer expired", - 27: "profiling timer expired", - 28: "window size changes", - 29: "information request", - 30: "user defined signal 1", - 31: "user defined signal 2", - 32: "power fail/restart", +var signalList = [...]struct { + num syscall.Signal + name string + desc string +}{ + {1, "SIGHUP", "hangup"}, + {2, "SIGINT", "interrupt"}, + {3, "SIGQUIT", "quit"}, + {4, "SIGILL", "illegal instruction"}, + {5, "SIGTRAP", "trace/BPT trap"}, + {6, "SIGIOT", "abort trap"}, + {7, "SIGEMT", "EMT trap"}, + {8, "SIGFPE", "floating point exception"}, + {9, "SIGKILL", "killed"}, + {10, "SIGBUS", "bus error"}, + {11, "SIGSEGV", "segmentation fault"}, + {12, "SIGSYS", "bad system call"}, + {13, "SIGPIPE", "broken pipe"}, + {14, "SIGALRM", "alarm clock"}, + {15, "SIGTERM", "terminated"}, + {16, "SIGURG", "urgent I/O condition"}, + {17, "SIGSTOP", "stopped (signal)"}, + {18, "SIGTSTP", "stopped"}, + {19, "SIGCONT", "continued"}, + {20, "SIGCHLD", "child exited"}, + {21, "SIGTTIN", "stopped (tty input)"}, + {22, "SIGTTOU", "stopped (tty output)"}, + {23, "SIGIO", "I/O possible"}, + {24, "SIGXCPU", "cputime limit exceeded"}, + {25, "SIGXFSZ", "filesize limit exceeded"}, + {26, "SIGVTALRM", "virtual timer expired"}, + {27, "SIGPROF", "profiling timer expired"}, + {28, "SIGWINCH", "window size changes"}, + {29, "SIGINFO", "information request"}, + {30, "SIGUSR1", "user defined signal 1"}, + {31, "SIGUSR2", "user defined signal 2"}, + {32, "SIGPWR", "power fail/restart"}, } diff --git a/vendor/golang.org/x/sys/unix/zerrors_netbsd_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_netbsd_amd64.go index c994ab610c0..071701c4118 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_netbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_netbsd_amd64.go @@ -159,6 +159,7 @@ const ( CLONE_VFORK = 0x4000 CLONE_VM = 0x100 CREAD = 0x800 + CRTSCTS = 0x10000 CS5 = 0x0 CS6 = 0x100 CS7 = 0x200 @@ -1573,137 +1574,145 @@ const ( ) // Error table -var errors = [...]string{ - 1: "operation not permitted", - 2: "no such file or directory", - 3: "no such process", - 4: "interrupted system call", - 5: "input/output error", - 6: "device not configured", - 7: "argument list too long", - 8: "exec format error", - 9: "bad file descriptor", - 10: "no child processes", - 11: "resource deadlock avoided", - 12: "cannot allocate memory", - 13: "permission denied", - 14: "bad address", - 15: "block device required", - 16: "device busy", - 17: "file exists", - 18: "cross-device link", - 19: "operation not supported by device", - 20: "not a directory", - 21: "is a directory", - 22: "invalid argument", - 23: "too many open files in system", - 24: "too many open files", - 25: "inappropriate ioctl for device", - 26: "text file busy", - 27: "file too large", - 28: "no space left on device", - 29: "illegal seek", - 30: "read-only file system", - 31: "too many links", - 32: "broken pipe", - 33: "numerical argument out of domain", - 34: "result too large or too small", - 35: "resource temporarily unavailable", - 36: "operation now in progress", - 37: "operation already in progress", - 38: "socket operation on non-socket", - 39: "destination address required", - 40: "message too long", - 41: "protocol wrong type for socket", - 42: "protocol option not available", - 43: "protocol not supported", - 44: "socket type not supported", - 45: "operation not supported", - 46: "protocol family not supported", - 47: "address family not supported by protocol family", - 48: "address already in use", - 49: "can't assign requested address", - 50: "network is down", - 51: "network is unreachable", - 52: "network dropped connection on reset", - 53: "software caused connection abort", - 54: "connection reset by peer", - 55: "no buffer space available", - 56: "socket is already connected", - 57: "socket is not connected", - 58: "can't send after socket shutdown", - 59: "too many references: can't splice", - 60: "connection timed out", - 61: "connection refused", - 62: "too many levels of symbolic links", - 63: "file name too long", - 64: "host is down", - 65: "no route to host", - 66: "directory not empty", - 67: "too many processes", - 68: "too many users", - 69: "disc quota exceeded", - 70: "stale NFS file handle", - 71: "too many levels of remote in path", - 72: "RPC struct is bad", - 73: "RPC version wrong", - 74: "RPC prog. not avail", - 75: "program version wrong", - 76: "bad procedure for program", - 77: "no locks available", - 78: "function not implemented", - 79: "inappropriate file type or format", - 80: "authentication error", - 81: "need authenticator", - 82: "identifier removed", - 83: "no message of desired type", - 84: "value too large to be stored in data type", - 85: "illegal byte sequence", - 86: "not supported", - 87: "operation Canceled", - 88: "bad or Corrupt message", - 89: "no message available", - 90: "no STREAM resources", - 91: "not a STREAM", - 92: "STREAM ioctl timeout", - 93: "attribute not found", - 94: "multihop attempted", - 95: "link has been severed", - 96: "protocol error", +var errorList = [...]struct { + num syscall.Errno + name string + desc string +}{ + {1, "EPERM", "operation not permitted"}, + {2, "ENOENT", "no such file or directory"}, + {3, "ESRCH", "no such process"}, + {4, "EINTR", "interrupted system call"}, + {5, "EIO", "input/output error"}, + {6, "ENXIO", "device not configured"}, + {7, "E2BIG", "argument list too long"}, + {8, "ENOEXEC", "exec format error"}, + {9, "EBADF", "bad file descriptor"}, + {10, "ECHILD", "no child processes"}, + {11, "EDEADLK", "resource deadlock avoided"}, + {12, "ENOMEM", "cannot allocate memory"}, + {13, "EACCES", "permission denied"}, + {14, "EFAULT", "bad address"}, + {15, "ENOTBLK", "block device required"}, + {16, "EBUSY", "device busy"}, + {17, "EEXIST", "file exists"}, + {18, "EXDEV", "cross-device link"}, + {19, "ENODEV", "operation not supported by device"}, + {20, "ENOTDIR", "not a directory"}, + {21, "EISDIR", "is a directory"}, + {22, "EINVAL", "invalid argument"}, + {23, "ENFILE", "too many open files in system"}, + {24, "EMFILE", "too many open files"}, + {25, "ENOTTY", "inappropriate ioctl for device"}, + {26, "ETXTBSY", "text file busy"}, + {27, "EFBIG", "file too large"}, + {28, "ENOSPC", "no space left on device"}, + {29, "ESPIPE", "illegal seek"}, + {30, "EROFS", "read-only file system"}, + {31, "EMLINK", "too many links"}, + {32, "EPIPE", "broken pipe"}, + {33, "EDOM", "numerical argument out of domain"}, + {34, "ERANGE", "result too large or too small"}, + {35, "EAGAIN", "resource temporarily unavailable"}, + {36, "EINPROGRESS", "operation now in progress"}, + {37, "EALREADY", "operation already in progress"}, + {38, "ENOTSOCK", "socket operation on non-socket"}, + {39, "EDESTADDRREQ", "destination address required"}, + {40, "EMSGSIZE", "message too long"}, + {41, "EPROTOTYPE", "protocol wrong type for socket"}, + {42, "ENOPROTOOPT", "protocol option not available"}, + {43, "EPROTONOSUPPORT", "protocol not supported"}, + {44, "ESOCKTNOSUPPORT", "socket type not supported"}, + {45, "EOPNOTSUPP", "operation not supported"}, + {46, "EPFNOSUPPORT", "protocol family not supported"}, + {47, "EAFNOSUPPORT", "address family not supported by protocol family"}, + {48, "EADDRINUSE", "address already in use"}, + {49, "EADDRNOTAVAIL", "can't assign requested address"}, + {50, "ENETDOWN", "network is down"}, + {51, "ENETUNREACH", "network is unreachable"}, + {52, "ENETRESET", "network dropped connection on reset"}, + {53, "ECONNABORTED", "software caused connection abort"}, + {54, "ECONNRESET", "connection reset by peer"}, + {55, "ENOBUFS", "no buffer space available"}, + {56, "EISCONN", "socket is already connected"}, + {57, "ENOTCONN", "socket is not connected"}, + {58, "ESHUTDOWN", "can't send after socket shutdown"}, + {59, "ETOOMANYREFS", "too many references: can't splice"}, + {60, "ETIMEDOUT", "connection timed out"}, + {61, "ECONNREFUSED", "connection refused"}, + {62, "ELOOP", "too many levels of symbolic links"}, + {63, "ENAMETOOLONG", "file name too long"}, + {64, "EHOSTDOWN", "host is down"}, + {65, "EHOSTUNREACH", "no route to host"}, + {66, "ENOTEMPTY", "directory not empty"}, + {67, "EPROCLIM", "too many processes"}, + {68, "EUSERS", "too many users"}, + {69, "EDQUOT", "disc quota exceeded"}, + {70, "ESTALE", "stale NFS file handle"}, + {71, "EREMOTE", "too many levels of remote in path"}, + {72, "EBADRPC", "RPC struct is bad"}, + {73, "ERPCMISMATCH", "RPC version wrong"}, + {74, "EPROGUNAVAIL", "RPC prog. not avail"}, + {75, "EPROGMISMATCH", "program version wrong"}, + {76, "EPROCUNAVAIL", "bad procedure for program"}, + {77, "ENOLCK", "no locks available"}, + {78, "ENOSYS", "function not implemented"}, + {79, "EFTYPE", "inappropriate file type or format"}, + {80, "EAUTH", "authentication error"}, + {81, "ENEEDAUTH", "need authenticator"}, + {82, "EIDRM", "identifier removed"}, + {83, "ENOMSG", "no message of desired type"}, + {84, "EOVERFLOW", "value too large to be stored in data type"}, + {85, "EILSEQ", "illegal byte sequence"}, + {86, "ENOTSUP", "not supported"}, + {87, "ECANCELED", "operation Canceled"}, + {88, "EBADMSG", "bad or Corrupt message"}, + {89, "ENODATA", "no message available"}, + {90, "ENOSR", "no STREAM resources"}, + {91, "ENOSTR", "not a STREAM"}, + {92, "ETIME", "STREAM ioctl timeout"}, + {93, "ENOATTR", "attribute not found"}, + {94, "EMULTIHOP", "multihop attempted"}, + {95, "ENOLINK", "link has been severed"}, + {96, "ELAST", "protocol error"}, } // Signal table -var signals = [...]string{ - 1: "hangup", - 2: "interrupt", - 3: "quit", - 4: "illegal instruction", - 5: "trace/BPT trap", - 6: "abort trap", - 7: "EMT trap", - 8: "floating point exception", - 9: "killed", - 10: "bus error", - 11: "segmentation fault", - 12: "bad system call", - 13: "broken pipe", - 14: "alarm clock", - 15: "terminated", - 16: "urgent I/O condition", - 17: "stopped (signal)", - 18: "stopped", - 19: "continued", - 20: "child exited", - 21: "stopped (tty input)", - 22: "stopped (tty output)", - 23: "I/O possible", - 24: "cputime limit exceeded", - 25: "filesize limit exceeded", - 26: "virtual timer expired", - 27: "profiling timer expired", - 28: "window size changes", - 29: "information request", - 30: "user defined signal 1", - 31: "user defined signal 2", - 32: "power fail/restart", +var signalList = [...]struct { + num syscall.Signal + name string + desc string +}{ + {1, "SIGHUP", "hangup"}, + {2, "SIGINT", "interrupt"}, + {3, "SIGQUIT", "quit"}, + {4, "SIGILL", "illegal instruction"}, + {5, "SIGTRAP", "trace/BPT trap"}, + {6, "SIGIOT", "abort trap"}, + {7, "SIGEMT", "EMT trap"}, + {8, "SIGFPE", "floating point exception"}, + {9, "SIGKILL", "killed"}, + {10, "SIGBUS", "bus error"}, + {11, "SIGSEGV", "segmentation fault"}, + {12, "SIGSYS", "bad system call"}, + {13, "SIGPIPE", "broken pipe"}, + {14, "SIGALRM", "alarm clock"}, + {15, "SIGTERM", "terminated"}, + {16, "SIGURG", "urgent I/O condition"}, + {17, "SIGSTOP", "stopped (signal)"}, + {18, "SIGTSTP", "stopped"}, + {19, "SIGCONT", "continued"}, + {20, "SIGCHLD", "child exited"}, + {21, "SIGTTIN", "stopped (tty input)"}, + {22, "SIGTTOU", "stopped (tty output)"}, + {23, "SIGIO", "I/O possible"}, + {24, "SIGXCPU", "cputime limit exceeded"}, + {25, "SIGXFSZ", "filesize limit exceeded"}, + {26, "SIGVTALRM", "virtual timer expired"}, + {27, "SIGPROF", "profiling timer expired"}, + {28, "SIGWINCH", "window size changes"}, + {29, "SIGINFO", "information request"}, + {30, "SIGUSR1", "user defined signal 1"}, + {31, "SIGUSR2", "user defined signal 2"}, + {32, "SIGPWR", "power fail/restart"}, } diff --git a/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm.go b/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm.go index a8f9efedec5..5fe56ae8c72 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm.go @@ -151,6 +151,7 @@ const ( CFLUSH = 0xf CLOCAL = 0x8000 CREAD = 0x800 + CRTSCTS = 0x10000 CS5 = 0x0 CS6 = 0x100 CS7 = 0x200 @@ -1562,137 +1563,145 @@ const ( ) // Error table -var errors = [...]string{ - 1: "operation not permitted", - 2: "no such file or directory", - 3: "no such process", - 4: "interrupted system call", - 5: "input/output error", - 6: "device not configured", - 7: "argument list too long", - 8: "exec format error", - 9: "bad file descriptor", - 10: "no child processes", - 11: "resource deadlock avoided", - 12: "cannot allocate memory", - 13: "permission denied", - 14: "bad address", - 15: "block device required", - 16: "device busy", - 17: "file exists", - 18: "cross-device link", - 19: "operation not supported by device", - 20: "not a directory", - 21: "is a directory", - 22: "invalid argument", - 23: "too many open files in system", - 24: "too many open files", - 25: "inappropriate ioctl for device", - 26: "text file busy", - 27: "file too large", - 28: "no space left on device", - 29: "illegal seek", - 30: "read-only file system", - 31: "too many links", - 32: "broken pipe", - 33: "numerical argument out of domain", - 34: "result too large or too small", - 35: "resource temporarily unavailable", - 36: "operation now in progress", - 37: "operation already in progress", - 38: "socket operation on non-socket", - 39: "destination address required", - 40: "message too long", - 41: "protocol wrong type for socket", - 42: "protocol option not available", - 43: "protocol not supported", - 44: "socket type not supported", - 45: "operation not supported", - 46: "protocol family not supported", - 47: "address family not supported by protocol family", - 48: "address already in use", - 49: "can't assign requested address", - 50: "network is down", - 51: "network is unreachable", - 52: "network dropped connection on reset", - 53: "software caused connection abort", - 54: "connection reset by peer", - 55: "no buffer space available", - 56: "socket is already connected", - 57: "socket is not connected", - 58: "can't send after socket shutdown", - 59: "too many references: can't splice", - 60: "connection timed out", - 61: "connection refused", - 62: "too many levels of symbolic links", - 63: "file name too long", - 64: "host is down", - 65: "no route to host", - 66: "directory not empty", - 67: "too many processes", - 68: "too many users", - 69: "disc quota exceeded", - 70: "stale NFS file handle", - 71: "too many levels of remote in path", - 72: "RPC struct is bad", - 73: "RPC version wrong", - 74: "RPC prog. not avail", - 75: "program version wrong", - 76: "bad procedure for program", - 77: "no locks available", - 78: "function not implemented", - 79: "inappropriate file type or format", - 80: "authentication error", - 81: "need authenticator", - 82: "identifier removed", - 83: "no message of desired type", - 84: "value too large to be stored in data type", - 85: "illegal byte sequence", - 86: "not supported", - 87: "operation Canceled", - 88: "bad or Corrupt message", - 89: "no message available", - 90: "no STREAM resources", - 91: "not a STREAM", - 92: "STREAM ioctl timeout", - 93: "attribute not found", - 94: "multihop attempted", - 95: "link has been severed", - 96: "protocol error", +var errorList = [...]struct { + num syscall.Errno + name string + desc string +}{ + {1, "EPERM", "operation not permitted"}, + {2, "ENOENT", "no such file or directory"}, + {3, "ESRCH", "no such process"}, + {4, "EINTR", "interrupted system call"}, + {5, "EIO", "input/output error"}, + {6, "ENXIO", "device not configured"}, + {7, "E2BIG", "argument list too long"}, + {8, "ENOEXEC", "exec format error"}, + {9, "EBADF", "bad file descriptor"}, + {10, "ECHILD", "no child processes"}, + {11, "EDEADLK", "resource deadlock avoided"}, + {12, "ENOMEM", "cannot allocate memory"}, + {13, "EACCES", "permission denied"}, + {14, "EFAULT", "bad address"}, + {15, "ENOTBLK", "block device required"}, + {16, "EBUSY", "device busy"}, + {17, "EEXIST", "file exists"}, + {18, "EXDEV", "cross-device link"}, + {19, "ENODEV", "operation not supported by device"}, + {20, "ENOTDIR", "not a directory"}, + {21, "EISDIR", "is a directory"}, + {22, "EINVAL", "invalid argument"}, + {23, "ENFILE", "too many open files in system"}, + {24, "EMFILE", "too many open files"}, + {25, "ENOTTY", "inappropriate ioctl for device"}, + {26, "ETXTBSY", "text file busy"}, + {27, "EFBIG", "file too large"}, + {28, "ENOSPC", "no space left on device"}, + {29, "ESPIPE", "illegal seek"}, + {30, "EROFS", "read-only file system"}, + {31, "EMLINK", "too many links"}, + {32, "EPIPE", "broken pipe"}, + {33, "EDOM", "numerical argument out of domain"}, + {34, "ERANGE", "result too large or too small"}, + {35, "EAGAIN", "resource temporarily unavailable"}, + {36, "EINPROGRESS", "operation now in progress"}, + {37, "EALREADY", "operation already in progress"}, + {38, "ENOTSOCK", "socket operation on non-socket"}, + {39, "EDESTADDRREQ", "destination address required"}, + {40, "EMSGSIZE", "message too long"}, + {41, "EPROTOTYPE", "protocol wrong type for socket"}, + {42, "ENOPROTOOPT", "protocol option not available"}, + {43, "EPROTONOSUPPORT", "protocol not supported"}, + {44, "ESOCKTNOSUPPORT", "socket type not supported"}, + {45, "EOPNOTSUPP", "operation not supported"}, + {46, "EPFNOSUPPORT", "protocol family not supported"}, + {47, "EAFNOSUPPORT", "address family not supported by protocol family"}, + {48, "EADDRINUSE", "address already in use"}, + {49, "EADDRNOTAVAIL", "can't assign requested address"}, + {50, "ENETDOWN", "network is down"}, + {51, "ENETUNREACH", "network is unreachable"}, + {52, "ENETRESET", "network dropped connection on reset"}, + {53, "ECONNABORTED", "software caused connection abort"}, + {54, "ECONNRESET", "connection reset by peer"}, + {55, "ENOBUFS", "no buffer space available"}, + {56, "EISCONN", "socket is already connected"}, + {57, "ENOTCONN", "socket is not connected"}, + {58, "ESHUTDOWN", "can't send after socket shutdown"}, + {59, "ETOOMANYREFS", "too many references: can't splice"}, + {60, "ETIMEDOUT", "connection timed out"}, + {61, "ECONNREFUSED", "connection refused"}, + {62, "ELOOP", "too many levels of symbolic links"}, + {63, "ENAMETOOLONG", "file name too long"}, + {64, "EHOSTDOWN", "host is down"}, + {65, "EHOSTUNREACH", "no route to host"}, + {66, "ENOTEMPTY", "directory not empty"}, + {67, "EPROCLIM", "too many processes"}, + {68, "EUSERS", "too many users"}, + {69, "EDQUOT", "disc quota exceeded"}, + {70, "ESTALE", "stale NFS file handle"}, + {71, "EREMOTE", "too many levels of remote in path"}, + {72, "EBADRPC", "RPC struct is bad"}, + {73, "ERPCMISMATCH", "RPC version wrong"}, + {74, "EPROGUNAVAIL", "RPC prog. not avail"}, + {75, "EPROGMISMATCH", "program version wrong"}, + {76, "EPROCUNAVAIL", "bad procedure for program"}, + {77, "ENOLCK", "no locks available"}, + {78, "ENOSYS", "function not implemented"}, + {79, "EFTYPE", "inappropriate file type or format"}, + {80, "EAUTH", "authentication error"}, + {81, "ENEEDAUTH", "need authenticator"}, + {82, "EIDRM", "identifier removed"}, + {83, "ENOMSG", "no message of desired type"}, + {84, "EOVERFLOW", "value too large to be stored in data type"}, + {85, "EILSEQ", "illegal byte sequence"}, + {86, "ENOTSUP", "not supported"}, + {87, "ECANCELED", "operation Canceled"}, + {88, "EBADMSG", "bad or Corrupt message"}, + {89, "ENODATA", "no message available"}, + {90, "ENOSR", "no STREAM resources"}, + {91, "ENOSTR", "not a STREAM"}, + {92, "ETIME", "STREAM ioctl timeout"}, + {93, "ENOATTR", "attribute not found"}, + {94, "EMULTIHOP", "multihop attempted"}, + {95, "ENOLINK", "link has been severed"}, + {96, "ELAST", "protocol error"}, } // Signal table -var signals = [...]string{ - 1: "hangup", - 2: "interrupt", - 3: "quit", - 4: "illegal instruction", - 5: "trace/BPT trap", - 6: "abort trap", - 7: "EMT trap", - 8: "floating point exception", - 9: "killed", - 10: "bus error", - 11: "segmentation fault", - 12: "bad system call", - 13: "broken pipe", - 14: "alarm clock", - 15: "terminated", - 16: "urgent I/O condition", - 17: "stopped (signal)", - 18: "stopped", - 19: "continued", - 20: "child exited", - 21: "stopped (tty input)", - 22: "stopped (tty output)", - 23: "I/O possible", - 24: "cputime limit exceeded", - 25: "filesize limit exceeded", - 26: "virtual timer expired", - 27: "profiling timer expired", - 28: "window size changes", - 29: "information request", - 30: "user defined signal 1", - 31: "user defined signal 2", - 32: "power fail/restart", +var signalList = [...]struct { + num syscall.Signal + name string + desc string +}{ + {1, "SIGHUP", "hangup"}, + {2, "SIGINT", "interrupt"}, + {3, "SIGQUIT", "quit"}, + {4, "SIGILL", "illegal instruction"}, + {5, "SIGTRAP", "trace/BPT trap"}, + {6, "SIGIOT", "abort trap"}, + {7, "SIGEMT", "EMT trap"}, + {8, "SIGFPE", "floating point exception"}, + {9, "SIGKILL", "killed"}, + {10, "SIGBUS", "bus error"}, + {11, "SIGSEGV", "segmentation fault"}, + {12, "SIGSYS", "bad system call"}, + {13, "SIGPIPE", "broken pipe"}, + {14, "SIGALRM", "alarm clock"}, + {15, "SIGTERM", "terminated"}, + {16, "SIGURG", "urgent I/O condition"}, + {17, "SIGSTOP", "stopped (signal)"}, + {18, "SIGTSTP", "stopped"}, + {19, "SIGCONT", "continued"}, + {20, "SIGCHLD", "child exited"}, + {21, "SIGTTIN", "stopped (tty input)"}, + {22, "SIGTTOU", "stopped (tty output)"}, + {23, "SIGIO", "I/O possible"}, + {24, "SIGXCPU", "cputime limit exceeded"}, + {25, "SIGXFSZ", "filesize limit exceeded"}, + {26, "SIGVTALRM", "virtual timer expired"}, + {27, "SIGPROF", "profiling timer expired"}, + {28, "SIGWINCH", "window size changes"}, + {29, "SIGINFO", "information request"}, + {30, "SIGUSR1", "user defined signal 1"}, + {31, "SIGUSR2", "user defined signal 2"}, + {32, "SIGPWR", "power fail/restart"}, } diff --git a/vendor/golang.org/x/sys/unix/zerrors_openbsd_386.go b/vendor/golang.org/x/sys/unix/zerrors_openbsd_386.go index 04e4f33198d..0a1c3e7e8c3 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_openbsd_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_openbsd_386.go @@ -147,6 +147,7 @@ const ( CFLUSH = 0xf CLOCAL = 0x8000 CREAD = 0x800 + CRTSCTS = 0x10000 CS5 = 0x0 CS6 = 0x100 CS7 = 0x200 @@ -1460,132 +1461,140 @@ const ( ) // Error table -var errors = [...]string{ - 1: "operation not permitted", - 2: "no such file or directory", - 3: "no such process", - 4: "interrupted system call", - 5: "input/output error", - 6: "device not configured", - 7: "argument list too long", - 8: "exec format error", - 9: "bad file descriptor", - 10: "no child processes", - 11: "resource deadlock avoided", - 12: "cannot allocate memory", - 13: "permission denied", - 14: "bad address", - 15: "block device required", - 16: "device busy", - 17: "file exists", - 18: "cross-device link", - 19: "operation not supported by device", - 20: "not a directory", - 21: "is a directory", - 22: "invalid argument", - 23: "too many open files in system", - 24: "too many open files", - 25: "inappropriate ioctl for device", - 26: "text file busy", - 27: "file too large", - 28: "no space left on device", - 29: "illegal seek", - 30: "read-only file system", - 31: "too many links", - 32: "broken pipe", - 33: "numerical argument out of domain", - 34: "result too large", - 35: "resource temporarily unavailable", - 36: "operation now in progress", - 37: "operation already in progress", - 38: "socket operation on non-socket", - 39: "destination address required", - 40: "message too long", - 41: "protocol wrong type for socket", - 42: "protocol not available", - 43: "protocol not supported", - 44: "socket type not supported", - 45: "operation not supported", - 46: "protocol family not supported", - 47: "address family not supported by protocol family", - 48: "address already in use", - 49: "can't assign requested address", - 50: "network is down", - 51: "network is unreachable", - 52: "network dropped connection on reset", - 53: "software caused connection abort", - 54: "connection reset by peer", - 55: "no buffer space available", - 56: "socket is already connected", - 57: "socket is not connected", - 58: "can't send after socket shutdown", - 59: "too many references: can't splice", - 60: "connection timed out", - 61: "connection refused", - 62: "too many levels of symbolic links", - 63: "file name too long", - 64: "host is down", - 65: "no route to host", - 66: "directory not empty", - 67: "too many processes", - 68: "too many users", - 69: "disc quota exceeded", - 70: "stale NFS file handle", - 71: "too many levels of remote in path", - 72: "RPC struct is bad", - 73: "RPC version wrong", - 74: "RPC prog. not avail", - 75: "program version wrong", - 76: "bad procedure for program", - 77: "no locks available", - 78: "function not implemented", - 79: "inappropriate file type or format", - 80: "authentication error", - 81: "need authenticator", - 82: "IPsec processing failure", - 83: "attribute not found", - 84: "illegal byte sequence", - 85: "no medium found", - 86: "wrong medium type", - 87: "value too large to be stored in data type", - 88: "operation canceled", - 89: "identifier removed", - 90: "no message of desired type", - 91: "not supported", +var errorList = [...]struct { + num syscall.Errno + name string + desc string +}{ + {1, "EPERM", "operation not permitted"}, + {2, "ENOENT", "no such file or directory"}, + {3, "ESRCH", "no such process"}, + {4, "EINTR", "interrupted system call"}, + {5, "EIO", "input/output error"}, + {6, "ENXIO", "device not configured"}, + {7, "E2BIG", "argument list too long"}, + {8, "ENOEXEC", "exec format error"}, + {9, "EBADF", "bad file descriptor"}, + {10, "ECHILD", "no child processes"}, + {11, "EDEADLK", "resource deadlock avoided"}, + {12, "ENOMEM", "cannot allocate memory"}, + {13, "EACCES", "permission denied"}, + {14, "EFAULT", "bad address"}, + {15, "ENOTBLK", "block device required"}, + {16, "EBUSY", "device busy"}, + {17, "EEXIST", "file exists"}, + {18, "EXDEV", "cross-device link"}, + {19, "ENODEV", "operation not supported by device"}, + {20, "ENOTDIR", "not a directory"}, + {21, "EISDIR", "is a directory"}, + {22, "EINVAL", "invalid argument"}, + {23, "ENFILE", "too many open files in system"}, + {24, "EMFILE", "too many open files"}, + {25, "ENOTTY", "inappropriate ioctl for device"}, + {26, "ETXTBSY", "text file busy"}, + {27, "EFBIG", "file too large"}, + {28, "ENOSPC", "no space left on device"}, + {29, "ESPIPE", "illegal seek"}, + {30, "EROFS", "read-only file system"}, + {31, "EMLINK", "too many links"}, + {32, "EPIPE", "broken pipe"}, + {33, "EDOM", "numerical argument out of domain"}, + {34, "ERANGE", "result too large"}, + {35, "EWOULDBLOCK", "resource temporarily unavailable"}, + {36, "EINPROGRESS", "operation now in progress"}, + {37, "EALREADY", "operation already in progress"}, + {38, "ENOTSOCK", "socket operation on non-socket"}, + {39, "EDESTADDRREQ", "destination address required"}, + {40, "EMSGSIZE", "message too long"}, + {41, "EPROTOTYPE", "protocol wrong type for socket"}, + {42, "ENOPROTOOPT", "protocol not available"}, + {43, "EPROTONOSUPPORT", "protocol not supported"}, + {44, "ESOCKTNOSUPPORT", "socket type not supported"}, + {45, "EOPNOTSUPP", "operation not supported"}, + {46, "EPFNOSUPPORT", "protocol family not supported"}, + {47, "EAFNOSUPPORT", "address family not supported by protocol family"}, + {48, "EADDRINUSE", "address already in use"}, + {49, "EADDRNOTAVAIL", "can't assign requested address"}, + {50, "ENETDOWN", "network is down"}, + {51, "ENETUNREACH", "network is unreachable"}, + {52, "ENETRESET", "network dropped connection on reset"}, + {53, "ECONNABORTED", "software caused connection abort"}, + {54, "ECONNRESET", "connection reset by peer"}, + {55, "ENOBUFS", "no buffer space available"}, + {56, "EISCONN", "socket is already connected"}, + {57, "ENOTCONN", "socket is not connected"}, + {58, "ESHUTDOWN", "can't send after socket shutdown"}, + {59, "ETOOMANYREFS", "too many references: can't splice"}, + {60, "ETIMEDOUT", "operation timed out"}, + {61, "ECONNREFUSED", "connection refused"}, + {62, "ELOOP", "too many levels of symbolic links"}, + {63, "ENAMETOOLONG", "file name too long"}, + {64, "EHOSTDOWN", "host is down"}, + {65, "EHOSTUNREACH", "no route to host"}, + {66, "ENOTEMPTY", "directory not empty"}, + {67, "EPROCLIM", "too many processes"}, + {68, "EUSERS", "too many users"}, + {69, "EDQUOT", "disk quota exceeded"}, + {70, "ESTALE", "stale NFS file handle"}, + {71, "EREMOTE", "too many levels of remote in path"}, + {72, "EBADRPC", "RPC struct is bad"}, + {73, "ERPCMISMATCH", "RPC version wrong"}, + {74, "EPROGUNAVAIL", "RPC program not available"}, + {75, "EPROGMISMATCH", "program version wrong"}, + {76, "EPROCUNAVAIL", "bad procedure for program"}, + {77, "ENOLCK", "no locks available"}, + {78, "ENOSYS", "function not implemented"}, + {79, "EFTYPE", "inappropriate file type or format"}, + {80, "EAUTH", "authentication error"}, + {81, "ENEEDAUTH", "need authenticator"}, + {82, "EIPSEC", "IPsec processing failure"}, + {83, "ENOATTR", "attribute not found"}, + {84, "EILSEQ", "illegal byte sequence"}, + {85, "ENOMEDIUM", "no medium found"}, + {86, "EMEDIUMTYPE", "wrong medium type"}, + {87, "EOVERFLOW", "value too large to be stored in data type"}, + {88, "ECANCELED", "operation canceled"}, + {89, "EIDRM", "identifier removed"}, + {90, "ENOMSG", "no message of desired type"}, + {91, "ELAST", "not supported"}, } // Signal table -var signals = [...]string{ - 1: "hangup", - 2: "interrupt", - 3: "quit", - 4: "illegal instruction", - 5: "trace/BPT trap", - 6: "abort trap", - 7: "EMT trap", - 8: "floating point exception", - 9: "killed", - 10: "bus error", - 11: "segmentation fault", - 12: "bad system call", - 13: "broken pipe", - 14: "alarm clock", - 15: "terminated", - 16: "urgent I/O condition", - 17: "stopped (signal)", - 18: "stopped", - 19: "continued", - 20: "child exited", - 21: "stopped (tty input)", - 22: "stopped (tty output)", - 23: "I/O possible", - 24: "cputime limit exceeded", - 25: "filesize limit exceeded", - 26: "virtual timer expired", - 27: "profiling timer expired", - 28: "window size changes", - 29: "information request", - 30: "user defined signal 1", - 31: "user defined signal 2", - 32: "thread AST", +var signalList = [...]struct { + num syscall.Signal + name string + desc string +}{ + {1, "SIGHUP", "hangup"}, + {2, "SIGINT", "interrupt"}, + {3, "SIGQUIT", "quit"}, + {4, "SIGILL", "illegal instruction"}, + {5, "SIGTRAP", "trace/BPT trap"}, + {6, "SIGABRT", "abort trap"}, + {7, "SIGEMT", "EMT trap"}, + {8, "SIGFPE", "floating point exception"}, + {9, "SIGKILL", "killed"}, + {10, "SIGBUS", "bus error"}, + {11, "SIGSEGV", "segmentation fault"}, + {12, "SIGSYS", "bad system call"}, + {13, "SIGPIPE", "broken pipe"}, + {14, "SIGALRM", "alarm clock"}, + {15, "SIGTERM", "terminated"}, + {16, "SIGURG", "urgent I/O condition"}, + {17, "SIGSTOP", "suspended (signal)"}, + {18, "SIGTSTP", "suspended"}, + {19, "SIGCONT", "continued"}, + {20, "SIGCHLD", "child exited"}, + {21, "SIGTTIN", "stopped (tty input)"}, + {22, "SIGTTOU", "stopped (tty output)"}, + {23, "SIGIO", "I/O possible"}, + {24, "SIGXCPU", "cputime limit exceeded"}, + {25, "SIGXFSZ", "filesize limit exceeded"}, + {26, "SIGVTALRM", "virtual timer expired"}, + {27, "SIGPROF", "profiling timer expired"}, + {28, "SIGWINCH", "window size changes"}, + {29, "SIGINFO", "information request"}, + {30, "SIGUSR1", "user defined signal 1"}, + {31, "SIGUSR2", "user defined signal 2"}, + {32, "SIGTHR", "thread AST"}, } diff --git a/vendor/golang.org/x/sys/unix/zerrors_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_openbsd_amd64.go index c80ff981200..acfc6646919 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_openbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_openbsd_amd64.go @@ -45,6 +45,7 @@ const ( AF_SNA = 0xb AF_UNIX = 0x1 AF_UNSPEC = 0x0 + ALTWERASE = 0x200 ARPHRD_ETHER = 0x1 ARPHRD_FRELAY = 0xf ARPHRD_IEEE1394 = 0x18 @@ -146,7 +147,14 @@ const ( BRKINT = 0x2 CFLUSH = 0xf CLOCAL = 0x8000 + CLOCK_BOOTTIME = 0x6 + CLOCK_MONOTONIC = 0x3 + CLOCK_PROCESS_CPUTIME_ID = 0x2 + CLOCK_REALTIME = 0x0 + CLOCK_THREAD_CPUTIME_ID = 0x4 + CLOCK_UPTIME = 0x5 CREAD = 0x800 + CRTSCTS = 0x10000 CS5 = 0x0 CS6 = 0x100 CS7 = 0x200 @@ -177,6 +185,7 @@ const ( DLT_LOOP = 0xc DLT_MPLS = 0xdb DLT_NULL = 0x0 + DLT_OPENFLOW = 0x10b DLT_PFLOG = 0x75 DLT_PFSYNC = 0x12 DLT_PPP = 0x9 @@ -187,6 +196,23 @@ const ( DLT_RAW = 0xe DLT_SLIP = 0x8 DLT_SLIP_BSDOS = 0xf + DLT_USBPCAP = 0xf9 + DLT_USER0 = 0x93 + DLT_USER1 = 0x94 + DLT_USER10 = 0x9d + DLT_USER11 = 0x9e + DLT_USER12 = 0x9f + DLT_USER13 = 0xa0 + DLT_USER14 = 0xa1 + DLT_USER15 = 0xa2 + DLT_USER2 = 0x95 + DLT_USER3 = 0x96 + DLT_USER4 = 0x97 + DLT_USER5 = 0x98 + DLT_USER6 = 0x99 + DLT_USER7 = 0x9a + DLT_USER8 = 0x9b + DLT_USER9 = 0x9c DT_BLK = 0x6 DT_CHR = 0x2 DT_DIR = 0x4 @@ -400,27 +426,38 @@ const ( ETHER_CRC_POLY_LE = 0xedb88320 ETHER_HDR_LEN = 0xe ETHER_MAX_DIX_LEN = 0x600 + ETHER_MAX_HARDMTU_LEN = 0xff9b ETHER_MAX_LEN = 0x5ee ETHER_MIN_LEN = 0x40 ETHER_TYPE_LEN = 0x2 ETHER_VLAN_ENCAP_LEN = 0x4 EVFILT_AIO = -0x3 + EVFILT_DEVICE = -0x8 EVFILT_PROC = -0x5 EVFILT_READ = -0x1 EVFILT_SIGNAL = -0x6 - EVFILT_SYSCOUNT = 0x7 + EVFILT_SYSCOUNT = 0x8 EVFILT_TIMER = -0x7 EVFILT_VNODE = -0x4 EVFILT_WRITE = -0x2 + EVL_ENCAPLEN = 0x4 + EVL_PRIO_BITS = 0xd + EVL_PRIO_MAX = 0x7 + EVL_VLID_MASK = 0xfff + EVL_VLID_MAX = 0xffe + EVL_VLID_MIN = 0x1 + EVL_VLID_NULL = 0x0 EV_ADD = 0x1 EV_CLEAR = 0x20 EV_DELETE = 0x2 EV_DISABLE = 0x8 + EV_DISPATCH = 0x80 EV_ENABLE = 0x4 EV_EOF = 0x8000 EV_ERROR = 0x4000 EV_FLAG1 = 0x2000 EV_ONESHOT = 0x10 + EV_RECEIPT = 0x40 EV_SYSFLAGS = 0xf000 EXTA = 0x4b00 EXTB = 0x9600 @@ -434,7 +471,7 @@ const ( F_GETFL = 0x3 F_GETLK = 0x7 F_GETOWN = 0x5 - F_OK = 0x0 + F_ISATTY = 0xb F_RDLCK = 0x1 F_SETFD = 0x2 F_SETFL = 0x4 @@ -451,7 +488,6 @@ const ( IEXTEN = 0x400 IFAN_ARRIVAL = 0x0 IFAN_DEPARTURE = 0x1 - IFA_ROUTE = 0x1 IFF_ALLMULTI = 0x200 IFF_BROADCAST = 0x2 IFF_CANTCHANGE = 0x8e52 @@ -462,12 +498,12 @@ const ( IFF_LOOPBACK = 0x8 IFF_MULTICAST = 0x8000 IFF_NOARP = 0x80 - IFF_NOTRAILERS = 0x20 IFF_OACTIVE = 0x400 IFF_POINTOPOINT = 0x10 IFF_PROMISC = 0x100 IFF_RUNNING = 0x40 IFF_SIMPLEX = 0x800 + IFF_STATICARP = 0x20 IFF_UP = 0x1 IFNAMSIZ = 0x10 IFT_1822 = 0x2 @@ -596,6 +632,7 @@ const ( IFT_LINEGROUP = 0xd2 IFT_LOCALTALK = 0x2a IFT_LOOP = 0x18 + IFT_MBIM = 0xfa IFT_MEDIAMAILOVERIP = 0x8b IFT_MFSIGLINK = 0xa7 IFT_MIOX25 = 0x26 @@ -720,8 +757,6 @@ const ( IPPROTO_AH = 0x33 IPPROTO_CARP = 0x70 IPPROTO_DIVERT = 0x102 - IPPROTO_DIVERT_INIT = 0x2 - IPPROTO_DIVERT_RESP = 0x1 IPPROTO_DONE = 0x101 IPPROTO_DSTOPTS = 0x3c IPPROTO_EGP = 0x8 @@ -778,6 +813,7 @@ const ( IPV6_LEAVE_GROUP = 0xd IPV6_MAXHLIM = 0xff IPV6_MAXPACKET = 0xffff + IPV6_MINHOPCOUNT = 0x41 IPV6_MMTU = 0x500 IPV6_MULTICAST_HOPS = 0xa IPV6_MULTICAST_IF = 0x9 @@ -817,12 +853,12 @@ const ( IP_DEFAULT_MULTICAST_LOOP = 0x1 IP_DEFAULT_MULTICAST_TTL = 0x1 IP_DF = 0x4000 - IP_DIVERTFL = 0x1022 IP_DROP_MEMBERSHIP = 0xd IP_ESP_NETWORK_LEVEL = 0x16 IP_ESP_TRANS_LEVEL = 0x15 IP_HDRINCL = 0x2 IP_IPCOMP_LEVEL = 0x1d + IP_IPDEFTTL = 0x25 IP_IPSECFLOWINFO = 0x24 IP_IPSEC_LOCAL_AUTH = 0x1b IP_IPSEC_LOCAL_CRED = 0x19 @@ -856,10 +892,12 @@ const ( IP_RETOPTS = 0x8 IP_RF = 0x8000 IP_RTABLE = 0x1021 + IP_SENDSRCADDR = 0x7 IP_TOS = 0x3 IP_TTL = 0x4 ISIG = 0x80 ISTRIP = 0x20 + IUCLC = 0x1000 IXANY = 0x800 IXOFF = 0x400 IXON = 0x200 @@ -880,25 +918,28 @@ const ( MADV_SPACEAVAIL = 0x5 MADV_WILLNEED = 0x3 MAP_ANON = 0x1000 - MAP_COPY = 0x4 + MAP_ANONYMOUS = 0x1000 + MAP_COPY = 0x2 MAP_FILE = 0x0 MAP_FIXED = 0x10 - MAP_FLAGMASK = 0x1ff7 - MAP_HASSEMAPHORE = 0x200 - MAP_INHERIT = 0x80 + MAP_FLAGMASK = 0x7ff7 + MAP_HASSEMAPHORE = 0x0 + MAP_INHERIT = 0x0 MAP_INHERIT_COPY = 0x1 - MAP_INHERIT_DONATE_COPY = 0x3 MAP_INHERIT_NONE = 0x2 MAP_INHERIT_SHARE = 0x0 - MAP_NOEXTEND = 0x100 - MAP_NORESERVE = 0x40 + MAP_INHERIT_ZERO = 0x3 + MAP_NOEXTEND = 0x0 + MAP_NORESERVE = 0x0 MAP_PRIVATE = 0x2 - MAP_RENAME = 0x20 + MAP_RENAME = 0x0 MAP_SHARED = 0x1 - MAP_TRYFIXED = 0x400 + MAP_STACK = 0x4000 + MAP_TRYFIXED = 0x0 MCL_CURRENT = 0x1 MCL_FUTURE = 0x2 MSG_BCAST = 0x100 + MSG_CMSG_CLOEXEC = 0x800 MSG_CTRUNC = 0x20 MSG_DONTROUTE = 0x4 MSG_DONTWAIT = 0x80 @@ -916,11 +957,14 @@ const ( NET_RT_DUMP = 0x1 NET_RT_FLAGS = 0x2 NET_RT_IFLIST = 0x3 - NET_RT_MAXID = 0x6 + NET_RT_IFNAMES = 0x6 + NET_RT_MAXID = 0x7 NET_RT_STATS = 0x4 NET_RT_TABLE = 0x5 NOFLSH = 0x80000000 + NOKERNINFO = 0x2000000 NOTE_ATTRIB = 0x8 + NOTE_CHANGE = 0x1 NOTE_CHILD = 0x4 NOTE_DELETE = 0x1 NOTE_EOF = 0x2 @@ -939,11 +983,13 @@ const ( NOTE_TRUNCATE = 0x80 NOTE_WRITE = 0x2 OCRNL = 0x10 + OLCUC = 0x20 ONLCR = 0x2 ONLRET = 0x80 ONOCR = 0x40 ONOEOT = 0x8 OPOST = 0x1 + OXTABS = 0x4 O_ACCMODE = 0x3 O_APPEND = 0x8 O_ASYNC = 0x40 @@ -981,23 +1027,32 @@ const ( RLIMIT_CPU = 0x0 RLIMIT_DATA = 0x2 RLIMIT_FSIZE = 0x1 + RLIMIT_MEMLOCK = 0x6 RLIMIT_NOFILE = 0x8 + RLIMIT_NPROC = 0x7 + RLIMIT_RSS = 0x5 RLIMIT_STACK = 0x3 RLIM_INFINITY = 0x7fffffffffffffff RTAX_AUTHOR = 0x6 + RTAX_BFD = 0xb RTAX_BRD = 0x7 + RTAX_DNS = 0xc RTAX_DST = 0x0 RTAX_GATEWAY = 0x1 RTAX_GENMASK = 0x3 RTAX_IFA = 0x5 RTAX_IFP = 0x4 RTAX_LABEL = 0xa - RTAX_MAX = 0xb + RTAX_MAX = 0xf RTAX_NETMASK = 0x2 + RTAX_SEARCH = 0xe RTAX_SRC = 0x8 RTAX_SRCMASK = 0x9 + RTAX_STATIC = 0xd RTA_AUTHOR = 0x40 + RTA_BFD = 0x800 RTA_BRD = 0x80 + RTA_DNS = 0x1000 RTA_DST = 0x1 RTA_GATEWAY = 0x2 RTA_GENMASK = 0x8 @@ -1005,34 +1060,39 @@ const ( RTA_IFP = 0x10 RTA_LABEL = 0x400 RTA_NETMASK = 0x4 + RTA_SEARCH = 0x4000 RTA_SRC = 0x100 RTA_SRCMASK = 0x200 + RTA_STATIC = 0x2000 RTF_ANNOUNCE = 0x4000 + RTF_BFD = 0x1000000 RTF_BLACKHOLE = 0x1000 + RTF_BROADCAST = 0x400000 + RTF_CACHED = 0x20000 RTF_CLONED = 0x10000 RTF_CLONING = 0x100 + RTF_CONNECTED = 0x800000 RTF_DONE = 0x40 RTF_DYNAMIC = 0x10 - RTF_FMASK = 0x10f808 + RTF_FMASK = 0x110fc08 RTF_GATEWAY = 0x2 RTF_HOST = 0x4 RTF_LLINFO = 0x400 - RTF_MASK = 0x80 + RTF_LOCAL = 0x200000 RTF_MODIFIED = 0x20 RTF_MPATH = 0x40000 RTF_MPLS = 0x100000 + RTF_MULTICAST = 0x200 RTF_PERMANENT_ARP = 0x2000 RTF_PROTO1 = 0x8000 RTF_PROTO2 = 0x4000 RTF_PROTO3 = 0x2000 RTF_REJECT = 0x8 - RTF_SOURCE = 0x20000 RTF_STATIC = 0x800 - RTF_TUNNEL = 0x100000 RTF_UP = 0x1 RTF_USETRAILERS = 0x8000 - RTF_XRESOLVE = 0x200 RTM_ADD = 0x1 + RTM_BFD = 0x12 RTM_CHANGE = 0x3 RTM_DELADDR = 0xd RTM_DELETE = 0x2 @@ -1040,11 +1100,13 @@ const ( RTM_GET = 0x4 RTM_IFANNOUNCE = 0xf RTM_IFINFO = 0xe + RTM_INVALIDATE = 0x11 RTM_LOCK = 0x8 RTM_LOSING = 0x5 RTM_MAXSIZE = 0x800 RTM_MISS = 0x7 RTM_NEWADDR = 0xc + RTM_PROPOSAL = 0x13 RTM_REDIRECT = 0x6 RTM_RESOLVE = 0xb RTM_RTTUNIT = 0xf4240 @@ -1057,6 +1119,8 @@ const ( RTV_RTTVAR = 0x80 RTV_SPIPE = 0x10 RTV_SSTHRESH = 0x20 + RT_TABLEID_BITS = 0x8 + RT_TABLEID_MASK = 0xff RT_TABLEID_MAX = 0xff RUSAGE_CHILDREN = -0x1 RUSAGE_SELF = 0x0 @@ -1069,55 +1133,55 @@ const ( SIOCADDMULTI = 0x80206931 SIOCAIFADDR = 0x8040691a SIOCAIFGROUP = 0x80286987 - SIOCALIFADDR = 0x8218691c SIOCATMARK = 0x40047307 - SIOCBRDGADD = 0x8058693c - SIOCBRDGADDS = 0x80586941 - SIOCBRDGARL = 0x806e694d + SIOCBRDGADD = 0x8060693c + SIOCBRDGADDL = 0x80606949 + SIOCBRDGADDS = 0x80606941 + SIOCBRDGARL = 0x808c694d SIOCBRDGDADDR = 0x81286947 - SIOCBRDGDEL = 0x8058693d - SIOCBRDGDELS = 0x80586942 - SIOCBRDGFLUSH = 0x80586948 - SIOCBRDGFRL = 0x806e694e - SIOCBRDGGCACHE = 0xc0146941 - SIOCBRDGGFD = 0xc0146952 - SIOCBRDGGHT = 0xc0146951 - SIOCBRDGGIFFLGS = 0xc058693e - SIOCBRDGGMA = 0xc0146953 + SIOCBRDGDEL = 0x8060693d + SIOCBRDGDELS = 0x80606942 + SIOCBRDGFLUSH = 0x80606948 + SIOCBRDGFRL = 0x808c694e + SIOCBRDGGCACHE = 0xc0186941 + SIOCBRDGGFD = 0xc0186952 + SIOCBRDGGHT = 0xc0186951 + SIOCBRDGGIFFLGS = 0xc060693e + SIOCBRDGGMA = 0xc0186953 SIOCBRDGGPARAM = 0xc0406958 - SIOCBRDGGPRI = 0xc0146950 + SIOCBRDGGPRI = 0xc0186950 SIOCBRDGGRL = 0xc030694f - SIOCBRDGGSIFS = 0xc058693c - SIOCBRDGGTO = 0xc0146946 - SIOCBRDGIFS = 0xc0586942 + SIOCBRDGGTO = 0xc0186946 + SIOCBRDGIFS = 0xc0606942 SIOCBRDGRTS = 0xc0206943 SIOCBRDGSADDR = 0xc1286944 - SIOCBRDGSCACHE = 0x80146940 - SIOCBRDGSFD = 0x80146952 - SIOCBRDGSHT = 0x80146951 - SIOCBRDGSIFCOST = 0x80586955 - SIOCBRDGSIFFLGS = 0x8058693f - SIOCBRDGSIFPRIO = 0x80586954 - SIOCBRDGSMA = 0x80146953 - SIOCBRDGSPRI = 0x80146950 - SIOCBRDGSPROTO = 0x8014695a - SIOCBRDGSTO = 0x80146945 - SIOCBRDGSTXHC = 0x80146959 + SIOCBRDGSCACHE = 0x80186940 + SIOCBRDGSFD = 0x80186952 + SIOCBRDGSHT = 0x80186951 + SIOCBRDGSIFCOST = 0x80606955 + SIOCBRDGSIFFLGS = 0x8060693f + SIOCBRDGSIFPRIO = 0x80606954 + SIOCBRDGSIFPROT = 0x8060694a + SIOCBRDGSMA = 0x80186953 + SIOCBRDGSPRI = 0x80186950 + SIOCBRDGSPROTO = 0x8018695a + SIOCBRDGSTO = 0x80186945 + SIOCBRDGSTXHC = 0x80186959 SIOCDELMULTI = 0x80206932 SIOCDIFADDR = 0x80206919 SIOCDIFGROUP = 0x80286989 + SIOCDIFPARENT = 0x802069b4 SIOCDIFPHYADDR = 0x80206949 - SIOCDLIFADDR = 0x8218691e + SIOCDVNETID = 0x802069af SIOCGETKALIVE = 0xc01869a4 SIOCGETLABEL = 0x8020699a + SIOCGETMPWCFG = 0xc02069ae SIOCGETPFLOW = 0xc02069fe SIOCGETPFSYNC = 0xc02069f8 SIOCGETSGCNT = 0xc0207534 SIOCGETVIFCNT = 0xc0287533 SIOCGETVLAN = 0xc0206990 - SIOCGHIWAT = 0x40047301 SIOCGIFADDR = 0xc0206921 - SIOCGIFASYNCMAP = 0xc020697c SIOCGIFBRDADDR = 0xc0206923 SIOCGIFCONF = 0xc0106924 SIOCGIFDATA = 0xc020691b @@ -1129,37 +1193,41 @@ const ( SIOCGIFGMEMB = 0xc028698a SIOCGIFGROUP = 0xc0286988 SIOCGIFHARDMTU = 0xc02069a5 - SIOCGIFMEDIA = 0xc0306936 + SIOCGIFLLPRIO = 0xc02069b6 + SIOCGIFMEDIA = 0xc0406938 SIOCGIFMETRIC = 0xc0206917 SIOCGIFMTU = 0xc020697e SIOCGIFNETMASK = 0xc0206925 - SIOCGIFPDSTADDR = 0xc0206948 + SIOCGIFPAIR = 0xc02069b1 + SIOCGIFPARENT = 0xc02069b3 SIOCGIFPRIORITY = 0xc020699c - SIOCGIFPSRCADDR = 0xc0206947 SIOCGIFRDOMAIN = 0xc02069a0 SIOCGIFRTLABEL = 0xc0206983 - SIOCGIFTIMESLOT = 0xc0206986 + SIOCGIFRXR = 0x802069aa SIOCGIFXFLAGS = 0xc020699e - SIOCGLIFADDR = 0xc218691d SIOCGLIFPHYADDR = 0xc218694b + SIOCGLIFPHYDF = 0xc02069c2 SIOCGLIFPHYRTABLE = 0xc02069a2 SIOCGLIFPHYTTL = 0xc02069a9 - SIOCGLOWAT = 0x40047303 SIOCGPGRP = 0x40047309 SIOCGSPPPPARAMS = 0xc0206994 + SIOCGUMBINFO = 0xc02069be + SIOCGUMBPARAM = 0xc02069c0 SIOCGVH = 0xc02069f6 + SIOCGVNETFLOWID = 0xc02069c4 SIOCGVNETID = 0xc02069a7 + SIOCIFAFATTACH = 0x801169ab + SIOCIFAFDETACH = 0x801169ac SIOCIFCREATE = 0x8020697a SIOCIFDESTROY = 0x80206979 SIOCIFGCLONERS = 0xc0106978 SIOCSETKALIVE = 0x801869a3 SIOCSETLABEL = 0x80206999 + SIOCSETMPWCFG = 0x802069ad SIOCSETPFLOW = 0x802069fd SIOCSETPFSYNC = 0x802069f7 SIOCSETVLAN = 0x8020698f - SIOCSHIWAT = 0x80047300 SIOCSIFADDR = 0x8020690c - SIOCSIFASYNCMAP = 0x8020697d SIOCSIFBRDADDR = 0x80206913 SIOCSIFDESCR = 0x80206980 SIOCSIFDSTADDR = 0x8020690e @@ -1167,25 +1235,36 @@ const ( SIOCSIFGATTR = 0x8028698c SIOCSIFGENERIC = 0x80206939 SIOCSIFLLADDR = 0x8020691f - SIOCSIFMEDIA = 0xc0206935 + SIOCSIFLLPRIO = 0x802069b5 + SIOCSIFMEDIA = 0xc0206937 SIOCSIFMETRIC = 0x80206918 SIOCSIFMTU = 0x8020697f SIOCSIFNETMASK = 0x80206916 - SIOCSIFPHYADDR = 0x80406946 + SIOCSIFPAIR = 0x802069b0 + SIOCSIFPARENT = 0x802069b2 SIOCSIFPRIORITY = 0x8020699b SIOCSIFRDOMAIN = 0x8020699f SIOCSIFRTLABEL = 0x80206982 - SIOCSIFTIMESLOT = 0x80206985 SIOCSIFXFLAGS = 0x8020699d SIOCSLIFPHYADDR = 0x8218694a + SIOCSLIFPHYDF = 0x802069c1 SIOCSLIFPHYRTABLE = 0x802069a1 SIOCSLIFPHYTTL = 0x802069a8 - SIOCSLOWAT = 0x80047302 SIOCSPGRP = 0x80047308 SIOCSSPPPPARAMS = 0x80206993 + SIOCSUMBPARAM = 0x802069bf SIOCSVH = 0xc02069f5 + SIOCSVNETFLOWID = 0x802069c3 SIOCSVNETID = 0x802069a6 + SIOCSWGDPID = 0xc018695b + SIOCSWGMAXFLOW = 0xc0186960 + SIOCSWGMAXGROUP = 0xc018695d + SIOCSWSDPID = 0x8018695c + SIOCSWSPORTNO = 0xc060695f + SOCK_CLOEXEC = 0x8000 SOCK_DGRAM = 0x2 + SOCK_DNS = 0x1000 + SOCK_NONBLOCK = 0x4000 SOCK_RAW = 0x3 SOCK_RDM = 0x4 SOCK_SEQPACKET = 0x5 @@ -1216,9 +1295,14 @@ const ( SO_TIMESTAMP = 0x800 SO_TYPE = 0x1008 SO_USELOOPBACK = 0x40 + SO_ZEROIZE = 0x2000 TCIFLUSH = 0x1 + TCIOFF = 0x3 TCIOFLUSH = 0x3 + TCION = 0x4 TCOFLUSH = 0x2 + TCOOFF = 0x1 + TCOON = 0x2 TCP_MAXBURST = 0x4 TCP_MAXSEG = 0x2 TCP_MAXWIN = 0xffff @@ -1228,11 +1312,12 @@ const ( TCP_MSS = 0x200 TCP_NODELAY = 0x1 TCP_NOPUSH = 0x10 - TCP_NSTATES = 0xb TCP_SACK_ENABLE = 0x8 TCSAFLUSH = 0x2 TIOCCBRK = 0x2000747a TIOCCDTR = 0x20007478 + TIOCCHKVERAUTH = 0x2000741e + TIOCCLRVERAUTH = 0x2000741d TIOCCONS = 0x80047462 TIOCDRAIN = 0x2000745e TIOCEXCL = 0x2000740d @@ -1287,16 +1372,19 @@ const ( TIOCSETAF = 0x802c7416 TIOCSETAW = 0x802c7415 TIOCSETD = 0x8004741b + TIOCSETVERAUTH = 0x8004741c TIOCSFLAGS = 0x8004745c TIOCSIG = 0x8004745f TIOCSPGRP = 0x80047476 TIOCSTART = 0x2000746e - TIOCSTAT = 0x80047465 + TIOCSTAT = 0x20007465 TIOCSTI = 0x80017472 TIOCSTOP = 0x2000746f TIOCSTSTAMP = 0x8008745a TIOCSWINSZ = 0x80087467 TIOCUCNTL = 0x80047466 + TIOCUCNTL_CBRK = 0x7a + TIOCUCNTL_SBRK = 0x7b TOSTOP = 0x400000 VDISCARD = 0xf VDSUSP = 0xb @@ -1308,6 +1396,18 @@ const ( VKILL = 0x5 VLNEXT = 0xe VMIN = 0x10 + VM_ANONMIN = 0x7 + VM_LOADAVG = 0x2 + VM_MAXID = 0xc + VM_MAXSLP = 0xa + VM_METER = 0x1 + VM_NKMEMPAGES = 0x6 + VM_PSSTRINGS = 0x3 + VM_SWAPENCRYPT = 0x5 + VM_USPACE = 0xb + VM_UVMEXP = 0x4 + VM_VNODEMIN = 0x9 + VM_VTEXTMIN = 0x8 VQUIT = 0x9 VREPRINT = 0x6 VSTART = 0xc @@ -1320,8 +1420,8 @@ const ( WCONTINUED = 0x8 WCOREFLAG = 0x80 WNOHANG = 0x1 - WSTOPPED = 0x7f WUNTRACED = 0x2 + XCASE = 0x1000000 ) // Errors @@ -1335,6 +1435,7 @@ const ( EALREADY = syscall.Errno(0x25) EAUTH = syscall.Errno(0x50) EBADF = syscall.Errno(0x9) + EBADMSG = syscall.Errno(0x5c) EBADRPC = syscall.Errno(0x48) EBUSY = syscall.Errno(0x10) ECANCELED = syscall.Errno(0x58) @@ -1361,7 +1462,7 @@ const ( EIPSEC = syscall.Errno(0x52) EISCONN = syscall.Errno(0x38) EISDIR = syscall.Errno(0x15) - ELAST = syscall.Errno(0x5b) + ELAST = syscall.Errno(0x5f) ELOOP = syscall.Errno(0x3e) EMEDIUMTYPE = syscall.Errno(0x56) EMFILE = syscall.Errno(0x18) @@ -1389,12 +1490,14 @@ const ( ENOTCONN = syscall.Errno(0x39) ENOTDIR = syscall.Errno(0x14) ENOTEMPTY = syscall.Errno(0x42) + ENOTRECOVERABLE = syscall.Errno(0x5d) ENOTSOCK = syscall.Errno(0x26) ENOTSUP = syscall.Errno(0x5b) ENOTTY = syscall.Errno(0x19) ENXIO = syscall.Errno(0x6) EOPNOTSUPP = syscall.Errno(0x2d) EOVERFLOW = syscall.Errno(0x57) + EOWNERDEAD = syscall.Errno(0x5e) EPERM = syscall.Errno(0x1) EPFNOSUPPORT = syscall.Errno(0x2e) EPIPE = syscall.Errno(0x20) @@ -1402,6 +1505,7 @@ const ( EPROCUNAVAIL = syscall.Errno(0x4c) EPROGMISMATCH = syscall.Errno(0x4b) EPROGUNAVAIL = syscall.Errno(0x4a) + EPROTO = syscall.Errno(0x5f) EPROTONOSUPPORT = syscall.Errno(0x2b) EPROTOTYPE = syscall.Errno(0x29) ERANGE = syscall.Errno(0x22) @@ -1459,132 +1563,144 @@ const ( ) // Error table -var errors = [...]string{ - 1: "operation not permitted", - 2: "no such file or directory", - 3: "no such process", - 4: "interrupted system call", - 5: "input/output error", - 6: "device not configured", - 7: "argument list too long", - 8: "exec format error", - 9: "bad file descriptor", - 10: "no child processes", - 11: "resource deadlock avoided", - 12: "cannot allocate memory", - 13: "permission denied", - 14: "bad address", - 15: "block device required", - 16: "device busy", - 17: "file exists", - 18: "cross-device link", - 19: "operation not supported by device", - 20: "not a directory", - 21: "is a directory", - 22: "invalid argument", - 23: "too many open files in system", - 24: "too many open files", - 25: "inappropriate ioctl for device", - 26: "text file busy", - 27: "file too large", - 28: "no space left on device", - 29: "illegal seek", - 30: "read-only file system", - 31: "too many links", - 32: "broken pipe", - 33: "numerical argument out of domain", - 34: "result too large", - 35: "resource temporarily unavailable", - 36: "operation now in progress", - 37: "operation already in progress", - 38: "socket operation on non-socket", - 39: "destination address required", - 40: "message too long", - 41: "protocol wrong type for socket", - 42: "protocol not available", - 43: "protocol not supported", - 44: "socket type not supported", - 45: "operation not supported", - 46: "protocol family not supported", - 47: "address family not supported by protocol family", - 48: "address already in use", - 49: "can't assign requested address", - 50: "network is down", - 51: "network is unreachable", - 52: "network dropped connection on reset", - 53: "software caused connection abort", - 54: "connection reset by peer", - 55: "no buffer space available", - 56: "socket is already connected", - 57: "socket is not connected", - 58: "can't send after socket shutdown", - 59: "too many references: can't splice", - 60: "connection timed out", - 61: "connection refused", - 62: "too many levels of symbolic links", - 63: "file name too long", - 64: "host is down", - 65: "no route to host", - 66: "directory not empty", - 67: "too many processes", - 68: "too many users", - 69: "disc quota exceeded", - 70: "stale NFS file handle", - 71: "too many levels of remote in path", - 72: "RPC struct is bad", - 73: "RPC version wrong", - 74: "RPC prog. not avail", - 75: "program version wrong", - 76: "bad procedure for program", - 77: "no locks available", - 78: "function not implemented", - 79: "inappropriate file type or format", - 80: "authentication error", - 81: "need authenticator", - 82: "IPsec processing failure", - 83: "attribute not found", - 84: "illegal byte sequence", - 85: "no medium found", - 86: "wrong medium type", - 87: "value too large to be stored in data type", - 88: "operation canceled", - 89: "identifier removed", - 90: "no message of desired type", - 91: "not supported", +var errorList = [...]struct { + num syscall.Errno + name string + desc string +}{ + {1, "EPERM", "operation not permitted"}, + {2, "ENOENT", "no such file or directory"}, + {3, "ESRCH", "no such process"}, + {4, "EINTR", "interrupted system call"}, + {5, "EIO", "input/output error"}, + {6, "ENXIO", "device not configured"}, + {7, "E2BIG", "argument list too long"}, + {8, "ENOEXEC", "exec format error"}, + {9, "EBADF", "bad file descriptor"}, + {10, "ECHILD", "no child processes"}, + {11, "EDEADLK", "resource deadlock avoided"}, + {12, "ENOMEM", "cannot allocate memory"}, + {13, "EACCES", "permission denied"}, + {14, "EFAULT", "bad address"}, + {15, "ENOTBLK", "block device required"}, + {16, "EBUSY", "device busy"}, + {17, "EEXIST", "file exists"}, + {18, "EXDEV", "cross-device link"}, + {19, "ENODEV", "operation not supported by device"}, + {20, "ENOTDIR", "not a directory"}, + {21, "EISDIR", "is a directory"}, + {22, "EINVAL", "invalid argument"}, + {23, "ENFILE", "too many open files in system"}, + {24, "EMFILE", "too many open files"}, + {25, "ENOTTY", "inappropriate ioctl for device"}, + {26, "ETXTBSY", "text file busy"}, + {27, "EFBIG", "file too large"}, + {28, "ENOSPC", "no space left on device"}, + {29, "ESPIPE", "illegal seek"}, + {30, "EROFS", "read-only file system"}, + {31, "EMLINK", "too many links"}, + {32, "EPIPE", "broken pipe"}, + {33, "EDOM", "numerical argument out of domain"}, + {34, "ERANGE", "result too large"}, + {35, "EAGAIN", "resource temporarily unavailable"}, + {36, "EINPROGRESS", "operation now in progress"}, + {37, "EALREADY", "operation already in progress"}, + {38, "ENOTSOCK", "socket operation on non-socket"}, + {39, "EDESTADDRREQ", "destination address required"}, + {40, "EMSGSIZE", "message too long"}, + {41, "EPROTOTYPE", "protocol wrong type for socket"}, + {42, "ENOPROTOOPT", "protocol not available"}, + {43, "EPROTONOSUPPORT", "protocol not supported"}, + {44, "ESOCKTNOSUPPORT", "socket type not supported"}, + {45, "EOPNOTSUPP", "operation not supported"}, + {46, "EPFNOSUPPORT", "protocol family not supported"}, + {47, "EAFNOSUPPORT", "address family not supported by protocol family"}, + {48, "EADDRINUSE", "address already in use"}, + {49, "EADDRNOTAVAIL", "can't assign requested address"}, + {50, "ENETDOWN", "network is down"}, + {51, "ENETUNREACH", "network is unreachable"}, + {52, "ENETRESET", "network dropped connection on reset"}, + {53, "ECONNABORTED", "software caused connection abort"}, + {54, "ECONNRESET", "connection reset by peer"}, + {55, "ENOBUFS", "no buffer space available"}, + {56, "EISCONN", "socket is already connected"}, + {57, "ENOTCONN", "socket is not connected"}, + {58, "ESHUTDOWN", "can't send after socket shutdown"}, + {59, "ETOOMANYREFS", "too many references: can't splice"}, + {60, "ETIMEDOUT", "operation timed out"}, + {61, "ECONNREFUSED", "connection refused"}, + {62, "ELOOP", "too many levels of symbolic links"}, + {63, "ENAMETOOLONG", "file name too long"}, + {64, "EHOSTDOWN", "host is down"}, + {65, "EHOSTUNREACH", "no route to host"}, + {66, "ENOTEMPTY", "directory not empty"}, + {67, "EPROCLIM", "too many processes"}, + {68, "EUSERS", "too many users"}, + {69, "EDQUOT", "disk quota exceeded"}, + {70, "ESTALE", "stale NFS file handle"}, + {71, "EREMOTE", "too many levels of remote in path"}, + {72, "EBADRPC", "RPC struct is bad"}, + {73, "ERPCMISMATCH", "RPC version wrong"}, + {74, "EPROGUNAVAIL", "RPC program not available"}, + {75, "EPROGMISMATCH", "program version wrong"}, + {76, "EPROCUNAVAIL", "bad procedure for program"}, + {77, "ENOLCK", "no locks available"}, + {78, "ENOSYS", "function not implemented"}, + {79, "EFTYPE", "inappropriate file type or format"}, + {80, "EAUTH", "authentication error"}, + {81, "ENEEDAUTH", "need authenticator"}, + {82, "EIPSEC", "IPsec processing failure"}, + {83, "ENOATTR", "attribute not found"}, + {84, "EILSEQ", "illegal byte sequence"}, + {85, "ENOMEDIUM", "no medium found"}, + {86, "EMEDIUMTYPE", "wrong medium type"}, + {87, "EOVERFLOW", "value too large to be stored in data type"}, + {88, "ECANCELED", "operation canceled"}, + {89, "EIDRM", "identifier removed"}, + {90, "ENOMSG", "no message of desired type"}, + {91, "ENOTSUP", "not supported"}, + {92, "EBADMSG", "bad message"}, + {93, "ENOTRECOVERABLE", "state not recoverable"}, + {94, "EOWNERDEAD", "previous owner died"}, + {95, "ELAST", "protocol error"}, } // Signal table -var signals = [...]string{ - 1: "hangup", - 2: "interrupt", - 3: "quit", - 4: "illegal instruction", - 5: "trace/BPT trap", - 6: "abort trap", - 7: "EMT trap", - 8: "floating point exception", - 9: "killed", - 10: "bus error", - 11: "segmentation fault", - 12: "bad system call", - 13: "broken pipe", - 14: "alarm clock", - 15: "terminated", - 16: "urgent I/O condition", - 17: "stopped (signal)", - 18: "stopped", - 19: "continued", - 20: "child exited", - 21: "stopped (tty input)", - 22: "stopped (tty output)", - 23: "I/O possible", - 24: "cputime limit exceeded", - 25: "filesize limit exceeded", - 26: "virtual timer expired", - 27: "profiling timer expired", - 28: "window size changes", - 29: "information request", - 30: "user defined signal 1", - 31: "user defined signal 2", - 32: "thread AST", +var signalList = [...]struct { + num syscall.Signal + name string + desc string +}{ + {1, "SIGHUP", "hangup"}, + {2, "SIGINT", "interrupt"}, + {3, "SIGQUIT", "quit"}, + {4, "SIGILL", "illegal instruction"}, + {5, "SIGTRAP", "trace/BPT trap"}, + {6, "SIGABRT", "abort trap"}, + {7, "SIGEMT", "EMT trap"}, + {8, "SIGFPE", "floating point exception"}, + {9, "SIGKILL", "killed"}, + {10, "SIGBUS", "bus error"}, + {11, "SIGSEGV", "segmentation fault"}, + {12, "SIGSYS", "bad system call"}, + {13, "SIGPIPE", "broken pipe"}, + {14, "SIGALRM", "alarm clock"}, + {15, "SIGTERM", "terminated"}, + {16, "SIGURG", "urgent I/O condition"}, + {17, "SIGSTOP", "suspended (signal)"}, + {18, "SIGTSTP", "suspended"}, + {19, "SIGCONT", "continued"}, + {20, "SIGCHLD", "child exited"}, + {21, "SIGTTIN", "stopped (tty input)"}, + {22, "SIGTTOU", "stopped (tty output)"}, + {23, "SIGIO", "I/O possible"}, + {24, "SIGXCPU", "cputime limit exceeded"}, + {25, "SIGXFSZ", "filesize limit exceeded"}, + {26, "SIGVTALRM", "virtual timer expired"}, + {27, "SIGPROF", "profiling timer expired"}, + {28, "SIGWINCH", "window size changes"}, + {29, "SIGINFO", "information request"}, + {30, "SIGUSR1", "user defined signal 1"}, + {31, "SIGUSR2", "user defined signal 2"}, + {32, "SIGTHR", "thread AST"}, } diff --git a/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm.go b/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm.go index 4c320495cc4..93e37c4b289 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm.go @@ -147,6 +147,7 @@ const ( CFLUSH = 0xf CLOCAL = 0x8000 CREAD = 0x800 + CRTSCTS = 0x10000 CS5 = 0x0 CS6 = 0x100 CS7 = 0x200 @@ -1462,132 +1463,140 @@ const ( ) // Error table -var errors = [...]string{ - 1: "operation not permitted", - 2: "no such file or directory", - 3: "no such process", - 4: "interrupted system call", - 5: "input/output error", - 6: "device not configured", - 7: "argument list too long", - 8: "exec format error", - 9: "bad file descriptor", - 10: "no child processes", - 11: "resource deadlock avoided", - 12: "cannot allocate memory", - 13: "permission denied", - 14: "bad address", - 15: "block device required", - 16: "device busy", - 17: "file exists", - 18: "cross-device link", - 19: "operation not supported by device", - 20: "not a directory", - 21: "is a directory", - 22: "invalid argument", - 23: "too many open files in system", - 24: "too many open files", - 25: "inappropriate ioctl for device", - 26: "text file busy", - 27: "file too large", - 28: "no space left on device", - 29: "illegal seek", - 30: "read-only file system", - 31: "too many links", - 32: "broken pipe", - 33: "numerical argument out of domain", - 34: "result too large", - 35: "resource temporarily unavailable", - 36: "operation now in progress", - 37: "operation already in progress", - 38: "socket operation on non-socket", - 39: "destination address required", - 40: "message too long", - 41: "protocol wrong type for socket", - 42: "protocol not available", - 43: "protocol not supported", - 44: "socket type not supported", - 45: "operation not supported", - 46: "protocol family not supported", - 47: "address family not supported by protocol family", - 48: "address already in use", - 49: "can't assign requested address", - 50: "network is down", - 51: "network is unreachable", - 52: "network dropped connection on reset", - 53: "software caused connection abort", - 54: "connection reset by peer", - 55: "no buffer space available", - 56: "socket is already connected", - 57: "socket is not connected", - 58: "can't send after socket shutdown", - 59: "too many references: can't splice", - 60: "connection timed out", - 61: "connection refused", - 62: "too many levels of symbolic links", - 63: "file name too long", - 64: "host is down", - 65: "no route to host", - 66: "directory not empty", - 67: "too many processes", - 68: "too many users", - 69: "disc quota exceeded", - 70: "stale NFS file handle", - 71: "too many levels of remote in path", - 72: "RPC struct is bad", - 73: "RPC version wrong", - 74: "RPC prog. not avail", - 75: "program version wrong", - 76: "bad procedure for program", - 77: "no locks available", - 78: "function not implemented", - 79: "inappropriate file type or format", - 80: "authentication error", - 81: "need authenticator", - 82: "IPsec processing failure", - 83: "attribute not found", - 84: "illegal byte sequence", - 85: "no medium found", - 86: "wrong medium type", - 87: "value too large to be stored in data type", - 88: "operation canceled", - 89: "identifier removed", - 90: "no message of desired type", - 91: "not supported", +var errorList = [...]struct { + num syscall.Errno + name string + desc string +}{ + {1, "EPERM", "operation not permitted"}, + {2, "ENOENT", "no such file or directory"}, + {3, "ESRCH", "no such process"}, + {4, "EINTR", "interrupted system call"}, + {5, "EIO", "input/output error"}, + {6, "ENXIO", "device not configured"}, + {7, "E2BIG", "argument list too long"}, + {8, "ENOEXEC", "exec format error"}, + {9, "EBADF", "bad file descriptor"}, + {10, "ECHILD", "no child processes"}, + {11, "EDEADLK", "resource deadlock avoided"}, + {12, "ENOMEM", "cannot allocate memory"}, + {13, "EACCES", "permission denied"}, + {14, "EFAULT", "bad address"}, + {15, "ENOTBLK", "block device required"}, + {16, "EBUSY", "device busy"}, + {17, "EEXIST", "file exists"}, + {18, "EXDEV", "cross-device link"}, + {19, "ENODEV", "operation not supported by device"}, + {20, "ENOTDIR", "not a directory"}, + {21, "EISDIR", "is a directory"}, + {22, "EINVAL", "invalid argument"}, + {23, "ENFILE", "too many open files in system"}, + {24, "EMFILE", "too many open files"}, + {25, "ENOTTY", "inappropriate ioctl for device"}, + {26, "ETXTBSY", "text file busy"}, + {27, "EFBIG", "file too large"}, + {28, "ENOSPC", "no space left on device"}, + {29, "ESPIPE", "illegal seek"}, + {30, "EROFS", "read-only file system"}, + {31, "EMLINK", "too many links"}, + {32, "EPIPE", "broken pipe"}, + {33, "EDOM", "numerical argument out of domain"}, + {34, "ERANGE", "result too large"}, + {35, "EWOULDBLOCK", "resource temporarily unavailable"}, + {36, "EINPROGRESS", "operation now in progress"}, + {37, "EALREADY", "operation already in progress"}, + {38, "ENOTSOCK", "socket operation on non-socket"}, + {39, "EDESTADDRREQ", "destination address required"}, + {40, "EMSGSIZE", "message too long"}, + {41, "EPROTOTYPE", "protocol wrong type for socket"}, + {42, "ENOPROTOOPT", "protocol not available"}, + {43, "EPROTONOSUPPORT", "protocol not supported"}, + {44, "ESOCKTNOSUPPORT", "socket type not supported"}, + {45, "EOPNOTSUPP", "operation not supported"}, + {46, "EPFNOSUPPORT", "protocol family not supported"}, + {47, "EAFNOSUPPORT", "address family not supported by protocol family"}, + {48, "EADDRINUSE", "address already in use"}, + {49, "EADDRNOTAVAIL", "can't assign requested address"}, + {50, "ENETDOWN", "network is down"}, + {51, "ENETUNREACH", "network is unreachable"}, + {52, "ENETRESET", "network dropped connection on reset"}, + {53, "ECONNABORTED", "software caused connection abort"}, + {54, "ECONNRESET", "connection reset by peer"}, + {55, "ENOBUFS", "no buffer space available"}, + {56, "EISCONN", "socket is already connected"}, + {57, "ENOTCONN", "socket is not connected"}, + {58, "ESHUTDOWN", "can't send after socket shutdown"}, + {59, "ETOOMANYREFS", "too many references: can't splice"}, + {60, "ETIMEDOUT", "operation timed out"}, + {61, "ECONNREFUSED", "connection refused"}, + {62, "ELOOP", "too many levels of symbolic links"}, + {63, "ENAMETOOLONG", "file name too long"}, + {64, "EHOSTDOWN", "host is down"}, + {65, "EHOSTUNREACH", "no route to host"}, + {66, "ENOTEMPTY", "directory not empty"}, + {67, "EPROCLIM", "too many processes"}, + {68, "EUSERS", "too many users"}, + {69, "EDQUOT", "disk quota exceeded"}, + {70, "ESTALE", "stale NFS file handle"}, + {71, "EREMOTE", "too many levels of remote in path"}, + {72, "EBADRPC", "RPC struct is bad"}, + {73, "ERPCMISMATCH", "RPC version wrong"}, + {74, "EPROGUNAVAIL", "RPC program not available"}, + {75, "EPROGMISMATCH", "program version wrong"}, + {76, "EPROCUNAVAIL", "bad procedure for program"}, + {77, "ENOLCK", "no locks available"}, + {78, "ENOSYS", "function not implemented"}, + {79, "EFTYPE", "inappropriate file type or format"}, + {80, "EAUTH", "authentication error"}, + {81, "ENEEDAUTH", "need authenticator"}, + {82, "EIPSEC", "IPsec processing failure"}, + {83, "ENOATTR", "attribute not found"}, + {84, "EILSEQ", "illegal byte sequence"}, + {85, "ENOMEDIUM", "no medium found"}, + {86, "EMEDIUMTYPE", "wrong medium type"}, + {87, "EOVERFLOW", "value too large to be stored in data type"}, + {88, "ECANCELED", "operation canceled"}, + {89, "EIDRM", "identifier removed"}, + {90, "ENOMSG", "no message of desired type"}, + {91, "ELAST", "not supported"}, } // Signal table -var signals = [...]string{ - 1: "hangup", - 2: "interrupt", - 3: "quit", - 4: "illegal instruction", - 5: "trace/BPT trap", - 6: "abort trap", - 7: "EMT trap", - 8: "floating point exception", - 9: "killed", - 10: "bus error", - 11: "segmentation fault", - 12: "bad system call", - 13: "broken pipe", - 14: "alarm clock", - 15: "terminated", - 16: "urgent I/O condition", - 17: "stopped (signal)", - 18: "stopped", - 19: "continued", - 20: "child exited", - 21: "stopped (tty input)", - 22: "stopped (tty output)", - 23: "I/O possible", - 24: "cputime limit exceeded", - 25: "filesize limit exceeded", - 26: "virtual timer expired", - 27: "profiling timer expired", - 28: "window size changes", - 29: "information request", - 30: "user defined signal 1", - 31: "user defined signal 2", - 32: "thread AST", +var signalList = [...]struct { + num syscall.Signal + name string + desc string +}{ + {1, "SIGHUP", "hangup"}, + {2, "SIGINT", "interrupt"}, + {3, "SIGQUIT", "quit"}, + {4, "SIGILL", "illegal instruction"}, + {5, "SIGTRAP", "trace/BPT trap"}, + {6, "SIGABRT", "abort trap"}, + {7, "SIGEMT", "EMT trap"}, + {8, "SIGFPE", "floating point exception"}, + {9, "SIGKILL", "killed"}, + {10, "SIGBUS", "bus error"}, + {11, "SIGSEGV", "segmentation fault"}, + {12, "SIGSYS", "bad system call"}, + {13, "SIGPIPE", "broken pipe"}, + {14, "SIGALRM", "alarm clock"}, + {15, "SIGTERM", "terminated"}, + {16, "SIGURG", "urgent I/O condition"}, + {17, "SIGSTOP", "suspended (signal)"}, + {18, "SIGTSTP", "suspended"}, + {19, "SIGCONT", "continued"}, + {20, "SIGCHLD", "child exited"}, + {21, "SIGTTIN", "stopped (tty input)"}, + {22, "SIGTTOU", "stopped (tty output)"}, + {23, "SIGIO", "I/O possible"}, + {24, "SIGXCPU", "cputime limit exceeded"}, + {25, "SIGXFSZ", "filesize limit exceeded"}, + {26, "SIGVTALRM", "virtual timer expired"}, + {27, "SIGPROF", "profiling timer expired"}, + {28, "SIGWINCH", "window size changes"}, + {29, "SIGINFO", "information request"}, + {30, "SIGUSR1", "user defined signal 1"}, + {31, "SIGUSR2", "user defined signal 2"}, + {32, "SIGTHR", "thread AST"}, } diff --git a/vendor/golang.org/x/sys/unix/zerrors_solaris_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_solaris_amd64.go index 09eedb00935..be42830cf3d 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_solaris_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_solaris_amd64.go @@ -1319,171 +1319,179 @@ const ( ) // Error table -var errors = [...]string{ - 1: "not owner", - 2: "no such file or directory", - 3: "no such process", - 4: "interrupted system call", - 5: "I/O error", - 6: "no such device or address", - 7: "arg list too long", - 8: "exec format error", - 9: "bad file number", - 10: "no child processes", - 11: "resource temporarily unavailable", - 12: "not enough space", - 13: "permission denied", - 14: "bad address", - 15: "block device required", - 16: "device busy", - 17: "file exists", - 18: "cross-device link", - 19: "no such device", - 20: "not a directory", - 21: "is a directory", - 22: "invalid argument", - 23: "file table overflow", - 24: "too many open files", - 25: "inappropriate ioctl for device", - 26: "text file busy", - 27: "file too large", - 28: "no space left on device", - 29: "illegal seek", - 30: "read-only file system", - 31: "too many links", - 32: "broken pipe", - 33: "argument out of domain", - 34: "result too large", - 35: "no message of desired type", - 36: "identifier removed", - 37: "channel number out of range", - 38: "level 2 not synchronized", - 39: "level 3 halted", - 40: "level 3 reset", - 41: "link number out of range", - 42: "protocol driver not attached", - 43: "no CSI structure available", - 44: "level 2 halted", - 45: "deadlock situation detected/avoided", - 46: "no record locks available", - 47: "operation canceled", - 48: "operation not supported", - 49: "disc quota exceeded", - 50: "bad exchange descriptor", - 51: "bad request descriptor", - 52: "message tables full", - 53: "anode table overflow", - 54: "bad request code", - 55: "invalid slot", - 56: "file locking deadlock", - 57: "bad font file format", - 58: "owner of the lock died", - 59: "lock is not recoverable", - 60: "not a stream device", - 61: "no data available", - 62: "timer expired", - 63: "out of stream resources", - 64: "machine is not on the network", - 65: "package not installed", - 66: "object is remote", - 67: "link has been severed", - 68: "advertise error", - 69: "srmount error", - 70: "communication error on send", - 71: "protocol error", - 72: "locked lock was unmapped ", - 73: "facility is not active", - 74: "multihop attempted", - 77: "not a data message", - 78: "file name too long", - 79: "value too large for defined data type", - 80: "name not unique on network", - 81: "file descriptor in bad state", - 82: "remote address changed", - 83: "can not access a needed shared library", - 84: "accessing a corrupted shared library", - 85: ".lib section in a.out corrupted", - 86: "attempting to link in more shared libraries than system limit", - 87: "can not exec a shared library directly", - 88: "illegal byte sequence", - 89: "operation not applicable", - 90: "number of symbolic links encountered during path name traversal exceeds MAXSYMLINKS", - 91: "error 91", - 92: "error 92", - 93: "directory not empty", - 94: "too many users", - 95: "socket operation on non-socket", - 96: "destination address required", - 97: "message too long", - 98: "protocol wrong type for socket", - 99: "option not supported by protocol", - 120: "protocol not supported", - 121: "socket type not supported", - 122: "operation not supported on transport endpoint", - 123: "protocol family not supported", - 124: "address family not supported by protocol family", - 125: "address already in use", - 126: "cannot assign requested address", - 127: "network is down", - 128: "network is unreachable", - 129: "network dropped connection because of reset", - 130: "software caused connection abort", - 131: "connection reset by peer", - 132: "no buffer space available", - 133: "transport endpoint is already connected", - 134: "transport endpoint is not connected", - 143: "cannot send after socket shutdown", - 144: "too many references: cannot splice", - 145: "connection timed out", - 146: "connection refused", - 147: "host is down", - 148: "no route to host", - 149: "operation already in progress", - 150: "operation now in progress", - 151: "stale NFS file handle", +var errorList = [...]struct { + num syscall.Errno + name string + desc string +}{ + {1, "EPERM", "not owner"}, + {2, "ENOENT", "no such file or directory"}, + {3, "ESRCH", "no such process"}, + {4, "EINTR", "interrupted system call"}, + {5, "EIO", "I/O error"}, + {6, "ENXIO", "no such device or address"}, + {7, "E2BIG", "arg list too long"}, + {8, "ENOEXEC", "exec format error"}, + {9, "EBADF", "bad file number"}, + {10, "ECHILD", "no child processes"}, + {11, "EAGAIN", "resource temporarily unavailable"}, + {12, "ENOMEM", "not enough space"}, + {13, "EACCES", "permission denied"}, + {14, "EFAULT", "bad address"}, + {15, "ENOTBLK", "block device required"}, + {16, "EBUSY", "device busy"}, + {17, "EEXIST", "file exists"}, + {18, "EXDEV", "cross-device link"}, + {19, "ENODEV", "no such device"}, + {20, "ENOTDIR", "not a directory"}, + {21, "EISDIR", "is a directory"}, + {22, "EINVAL", "invalid argument"}, + {23, "ENFILE", "file table overflow"}, + {24, "EMFILE", "too many open files"}, + {25, "ENOTTY", "inappropriate ioctl for device"}, + {26, "ETXTBSY", "text file busy"}, + {27, "EFBIG", "file too large"}, + {28, "ENOSPC", "no space left on device"}, + {29, "ESPIPE", "illegal seek"}, + {30, "EROFS", "read-only file system"}, + {31, "EMLINK", "too many links"}, + {32, "EPIPE", "broken pipe"}, + {33, "EDOM", "argument out of domain"}, + {34, "ERANGE", "result too large"}, + {35, "ENOMSG", "no message of desired type"}, + {36, "EIDRM", "identifier removed"}, + {37, "ECHRNG", "channel number out of range"}, + {38, "EL2NSYNC", "level 2 not synchronized"}, + {39, "EL3HLT", "level 3 halted"}, + {40, "EL3RST", "level 3 reset"}, + {41, "ELNRNG", "link number out of range"}, + {42, "EUNATCH", "protocol driver not attached"}, + {43, "ENOCSI", "no CSI structure available"}, + {44, "EL2HLT", "level 2 halted"}, + {45, "EDEADLK", "deadlock situation detected/avoided"}, + {46, "ENOLCK", "no record locks available"}, + {47, "ECANCELED", "operation canceled"}, + {48, "ENOTSUP", "operation not supported"}, + {49, "EDQUOT", "disc quota exceeded"}, + {50, "EBADE", "bad exchange descriptor"}, + {51, "EBADR", "bad request descriptor"}, + {52, "EXFULL", "message tables full"}, + {53, "ENOANO", "anode table overflow"}, + {54, "EBADRQC", "bad request code"}, + {55, "EBADSLT", "invalid slot"}, + {56, "EDEADLOCK", "file locking deadlock"}, + {57, "EBFONT", "bad font file format"}, + {58, "EOWNERDEAD", "owner of the lock died"}, + {59, "ENOTRECOVERABLE", "lock is not recoverable"}, + {60, "ENOSTR", "not a stream device"}, + {61, "ENODATA", "no data available"}, + {62, "ETIME", "timer expired"}, + {63, "ENOSR", "out of stream resources"}, + {64, "ENONET", "machine is not on the network"}, + {65, "ENOPKG", "package not installed"}, + {66, "EREMOTE", "object is remote"}, + {67, "ENOLINK", "link has been severed"}, + {68, "EADV", "advertise error"}, + {69, "ESRMNT", "srmount error"}, + {70, "ECOMM", "communication error on send"}, + {71, "EPROTO", "protocol error"}, + {72, "ELOCKUNMAPPED", "locked lock was unmapped "}, + {73, "ENOTACTIVE", "facility is not active"}, + {74, "EMULTIHOP", "multihop attempted"}, + {77, "EBADMSG", "not a data message"}, + {78, "ENAMETOOLONG", "file name too long"}, + {79, "EOVERFLOW", "value too large for defined data type"}, + {80, "ENOTUNIQ", "name not unique on network"}, + {81, "EBADFD", "file descriptor in bad state"}, + {82, "EREMCHG", "remote address changed"}, + {83, "ELIBACC", "can not access a needed shared library"}, + {84, "ELIBBAD", "accessing a corrupted shared library"}, + {85, "ELIBSCN", ".lib section in a.out corrupted"}, + {86, "ELIBMAX", "attempting to link in more shared libraries than system limit"}, + {87, "ELIBEXEC", "can not exec a shared library directly"}, + {88, "EILSEQ", "illegal byte sequence"}, + {89, "ENOSYS", "operation not applicable"}, + {90, "ELOOP", "number of symbolic links encountered during path name traversal exceeds MAXSYMLINKS"}, + {91, "ERESTART", "error 91"}, + {92, "ESTRPIPE", "error 92"}, + {93, "ENOTEMPTY", "directory not empty"}, + {94, "EUSERS", "too many users"}, + {95, "ENOTSOCK", "socket operation on non-socket"}, + {96, "EDESTADDRREQ", "destination address required"}, + {97, "EMSGSIZE", "message too long"}, + {98, "EPROTOTYPE", "protocol wrong type for socket"}, + {99, "ENOPROTOOPT", "option not supported by protocol"}, + {120, "EPROTONOSUPPORT", "protocol not supported"}, + {121, "ESOCKTNOSUPPORT", "socket type not supported"}, + {122, "EOPNOTSUPP", "operation not supported on transport endpoint"}, + {123, "EPFNOSUPPORT", "protocol family not supported"}, + {124, "EAFNOSUPPORT", "address family not supported by protocol family"}, + {125, "EADDRINUSE", "address already in use"}, + {126, "EADDRNOTAVAIL", "cannot assign requested address"}, + {127, "ENETDOWN", "network is down"}, + {128, "ENETUNREACH", "network is unreachable"}, + {129, "ENETRESET", "network dropped connection because of reset"}, + {130, "ECONNABORTED", "software caused connection abort"}, + {131, "ECONNRESET", "connection reset by peer"}, + {132, "ENOBUFS", "no buffer space available"}, + {133, "EISCONN", "transport endpoint is already connected"}, + {134, "ENOTCONN", "transport endpoint is not connected"}, + {143, "ESHUTDOWN", "cannot send after socket shutdown"}, + {144, "ETOOMANYREFS", "too many references: cannot splice"}, + {145, "ETIMEDOUT", "connection timed out"}, + {146, "ECONNREFUSED", "connection refused"}, + {147, "EHOSTDOWN", "host is down"}, + {148, "EHOSTUNREACH", "no route to host"}, + {149, "EALREADY", "operation already in progress"}, + {150, "EINPROGRESS", "operation now in progress"}, + {151, "ESTALE", "stale NFS file handle"}, } // Signal table -var signals = [...]string{ - 1: "hangup", - 2: "interrupt", - 3: "quit", - 4: "illegal Instruction", - 5: "trace/Breakpoint Trap", - 6: "abort", - 7: "emulation Trap", - 8: "arithmetic Exception", - 9: "killed", - 10: "bus Error", - 11: "segmentation Fault", - 12: "bad System Call", - 13: "broken Pipe", - 14: "alarm Clock", - 15: "terminated", - 16: "user Signal 1", - 17: "user Signal 2", - 18: "child Status Changed", - 19: "power-Fail/Restart", - 20: "window Size Change", - 21: "urgent Socket Condition", - 22: "pollable Event", - 23: "stopped (signal)", - 24: "stopped (user)", - 25: "continued", - 26: "stopped (tty input)", - 27: "stopped (tty output)", - 28: "virtual Timer Expired", - 29: "profiling Timer Expired", - 30: "cpu Limit Exceeded", - 31: "file Size Limit Exceeded", - 32: "no runnable lwp", - 33: "inter-lwp signal", - 34: "checkpoint Freeze", - 35: "checkpoint Thaw", - 36: "thread Cancellation", - 37: "resource Lost", - 38: "resource Control Exceeded", - 39: "reserved for JVM 1", - 40: "reserved for JVM 2", - 41: "information Request", +var signalList = [...]struct { + num syscall.Signal + name string + desc string +}{ + {1, "SIGHUP", "hangup"}, + {2, "SIGINT", "interrupt"}, + {3, "SIGQUIT", "quit"}, + {4, "SIGILL", "illegal Instruction"}, + {5, "SIGTRAP", "trace/Breakpoint Trap"}, + {6, "SIGABRT", "abort"}, + {7, "SIGEMT", "emulation Trap"}, + {8, "SIGFPE", "arithmetic Exception"}, + {9, "SIGKILL", "killed"}, + {10, "SIGBUS", "bus Error"}, + {11, "SIGSEGV", "segmentation Fault"}, + {12, "SIGSYS", "bad System Call"}, + {13, "SIGPIPE", "broken Pipe"}, + {14, "SIGALRM", "alarm Clock"}, + {15, "SIGTERM", "terminated"}, + {16, "SIGUSR1", "user Signal 1"}, + {17, "SIGUSR2", "user Signal 2"}, + {18, "SIGCHLD", "child Status Changed"}, + {19, "SIGPWR", "power-Fail/Restart"}, + {20, "SIGWINCH", "window Size Change"}, + {21, "SIGURG", "urgent Socket Condition"}, + {22, "SIGIO", "pollable Event"}, + {23, "SIGSTOP", "stopped (signal)"}, + {24, "SIGTSTP", "stopped (user)"}, + {25, "SIGCONT", "continued"}, + {26, "SIGTTIN", "stopped (tty input)"}, + {27, "SIGTTOU", "stopped (tty output)"}, + {28, "SIGVTALRM", "virtual Timer Expired"}, + {29, "SIGPROF", "profiling Timer Expired"}, + {30, "SIGXCPU", "cpu Limit Exceeded"}, + {31, "SIGXFSZ", "file Size Limit Exceeded"}, + {32, "SIGWAITING", "no runnable lwp"}, + {33, "SIGLWP", "inter-lwp signal"}, + {34, "SIGFREEZE", "checkpoint Freeze"}, + {35, "SIGTHAW", "checkpoint Thaw"}, + {36, "SIGCANCEL", "thread Cancellation"}, + {37, "SIGLOST", "resource Lost"}, + {38, "SIGXRES", "resource Control Exceeded"}, + {39, "SIGJVM1", "reserved for JVM 1"}, + {40, "SIGJVM2", "reserved for JVM 2"}, + {41, "SIGINFO", "information Request"}, } diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.go index 763ae4fbb93..ac02d4d8419 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.go @@ -399,6 +399,83 @@ func pipe() (r int, w int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func getxattr(path string, attr string, dest *byte, size int, position uint32, options int) (sz int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attr) + if err != nil { + return + } + r0, _, e1 := Syscall6(SYS_GETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(unsafe.Pointer(dest)), uintptr(size), uintptr(position), uintptr(options)) + sz = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func setxattr(path string, attr string, data *byte, size int, position uint32, options int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attr) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_SETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(unsafe.Pointer(data)), uintptr(size), uintptr(position), uintptr(options)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func removexattr(path string, attr string, options int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attr) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_REMOVEXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(options)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func listxattr(path string, dest *byte, size int, options int) (sz int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + r0, _, e1 := Syscall6(SYS_LISTXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(dest)), uintptr(size), uintptr(options), 0, 0) + sz = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func kill(pid int, signum int, posix int) (err error) { _, _, e1 := Syscall(SYS_KILL, uintptr(pid), uintptr(signum), uintptr(posix)) if e1 != 0 { @@ -693,6 +770,21 @@ func Fstat(fd int, stat *Stat_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FSTATAT64, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Fstatfs(fd int, stat *Statfs_t) (err error) { _, _, e1 := Syscall(SYS_FSTATFS64, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go index d6808e072de..1dd3cfa0e17 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go @@ -399,6 +399,83 @@ func pipe() (r int, w int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func getxattr(path string, attr string, dest *byte, size int, position uint32, options int) (sz int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attr) + if err != nil { + return + } + r0, _, e1 := Syscall6(SYS_GETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(unsafe.Pointer(dest)), uintptr(size), uintptr(position), uintptr(options)) + sz = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func setxattr(path string, attr string, data *byte, size int, position uint32, options int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attr) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_SETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(unsafe.Pointer(data)), uintptr(size), uintptr(position), uintptr(options)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func removexattr(path string, attr string, options int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attr) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_REMOVEXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(options)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func listxattr(path string, dest *byte, size int, options int) (sz int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + r0, _, e1 := Syscall6(SYS_LISTXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(dest)), uintptr(size), uintptr(options), 0, 0) + sz = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func kill(pid int, signum int, posix int) (err error) { _, _, e1 := Syscall(SYS_KILL, uintptr(pid), uintptr(signum), uintptr(posix)) if e1 != 0 { @@ -693,6 +770,21 @@ func Fstat(fd int, stat *Stat_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FSTATAT64, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Fstatfs(fd int, stat *Statfs_t) (err error) { _, _, e1 := Syscall(SYS_FSTATFS64, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.go index 6ae95e6b9a2..cab46e74ba4 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.go @@ -1,4 +1,4 @@ -// mksyscall.pl -tags darwin,arm syscall_bsd.go syscall_darwin.go syscall_darwin_arm.go +// mksyscall.pl -l32 -tags darwin,arm syscall_bsd.go syscall_darwin.go syscall_darwin_arm.go // Code generated by the command above; see README.md. DO NOT EDIT. // +build darwin,arm @@ -399,6 +399,83 @@ func pipe() (r int, w int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func getxattr(path string, attr string, dest *byte, size int, position uint32, options int) (sz int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attr) + if err != nil { + return + } + r0, _, e1 := Syscall6(SYS_GETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(unsafe.Pointer(dest)), uintptr(size), uintptr(position), uintptr(options)) + sz = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func setxattr(path string, attr string, data *byte, size int, position uint32, options int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attr) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_SETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(unsafe.Pointer(data)), uintptr(size), uintptr(position), uintptr(options)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func removexattr(path string, attr string, options int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attr) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_REMOVEXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(options)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func listxattr(path string, dest *byte, size int, options int) (sz int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + r0, _, e1 := Syscall6(SYS_LISTXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(dest)), uintptr(size), uintptr(options), 0, 0) + sz = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func kill(pid int, signum int, posix int) (err error) { _, _, e1 := Syscall(SYS_KILL, uintptr(pid), uintptr(signum), uintptr(posix)) if e1 != 0 { @@ -693,6 +770,21 @@ func Fstat(fd int, stat *Stat_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FSTATAT64, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Fstatfs(fd int, stat *Statfs_t) (err error) { _, _, e1 := Syscall(SYS_FSTATFS64, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go index ca6a7ea8b76..13478dd0bcf 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go @@ -399,6 +399,83 @@ func pipe() (r int, w int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func getxattr(path string, attr string, dest *byte, size int, position uint32, options int) (sz int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attr) + if err != nil { + return + } + r0, _, e1 := Syscall6(SYS_GETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(unsafe.Pointer(dest)), uintptr(size), uintptr(position), uintptr(options)) + sz = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func setxattr(path string, attr string, data *byte, size int, position uint32, options int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attr) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_SETXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(unsafe.Pointer(data)), uintptr(size), uintptr(position), uintptr(options)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func removexattr(path string, attr string, options int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(attr) + if err != nil { + return + } + _, _, e1 := Syscall(SYS_REMOVEXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(options)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func listxattr(path string, dest *byte, size int, options int) (sz int, err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + r0, _, e1 := Syscall6(SYS_LISTXATTR, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(dest)), uintptr(size), uintptr(options), 0, 0) + sz = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func kill(pid int, signum int, posix int) (err error) { _, _, e1 := Syscall(SYS_KILL, uintptr(pid), uintptr(signum), uintptr(posix)) if e1 != 0 { @@ -693,6 +770,21 @@ func Fstat(fd int, stat *Stat_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FSTATAT64, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Fstatfs(fd int, stat *Statfs_t) (err error) { _, _, e1 := Syscall(SYS_FSTATFS64, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go index a0241de195a..91f36e9ec30 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go @@ -618,6 +618,21 @@ func Fchmod(fd int, mode uint32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FCHMODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Fchown(fd int, uid int, gid int) (err error) { _, _, e1 := Syscall(SYS_FCHOWN, uintptr(fd), uintptr(uid), uintptr(gid)) if e1 != 0 { @@ -659,6 +674,21 @@ func Fstat(fd int, stat *Stat_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FSTATAT, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Fstatfs(fd int, stat *Statfs_t) (err error) { _, _, e1 := Syscall(SYS_FSTATFS, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go index fd9ca5a4a69..a86434a7ba8 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go @@ -924,6 +924,21 @@ func Fstat(fd int, stat *Stat_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FSTATAT, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Fstatfs(fd int, stat *Statfs_t) (err error) { _, _, e1 := Syscall(SYS_FSTATFS, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go index a9f18b22d32..040e2f760cd 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go @@ -924,6 +924,21 @@ func Fstat(fd int, stat *Stat_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FSTATAT, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Fstatfs(fd int, stat *Statfs_t) (err error) { _, _, e1 := Syscall(SYS_FSTATFS, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go index 9823e18a176..cddc5e86b19 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go @@ -924,6 +924,21 @@ func Fstat(fd int, stat *Stat_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FSTATAT, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Fstatfs(fd int, stat *Statfs_t) (err error) { _, _, e1 := Syscall(SYS_FSTATFS, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go index ef9602c1eb0..433becfd02a 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go @@ -995,6 +995,17 @@ func Nanosleep(time *Timespec, leftover *Timespec) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func PerfEventOpen(attr *PerfEventAttr, pid int, cpu int, groupFd int, flags int) (fd int, err error) { + r0, _, e1 := Syscall6(SYS_PERF_EVENT_OPEN, uintptr(unsafe.Pointer(attr)), uintptr(pid), uintptr(cpu), uintptr(groupFd), uintptr(flags), 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func PivotRoot(newroot string, putold string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(newroot) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go index 63054b35859..33c02b2695d 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go @@ -995,6 +995,17 @@ func Nanosleep(time *Timespec, leftover *Timespec) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func PerfEventOpen(attr *PerfEventAttr, pid int, cpu int, groupFd int, flags int) (fd int, err error) { + r0, _, e1 := Syscall6(SYS_PERF_EVENT_OPEN, uintptr(unsafe.Pointer(attr)), uintptr(pid), uintptr(cpu), uintptr(groupFd), uintptr(flags), 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func PivotRoot(newroot string, putold string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(newroot) @@ -1784,17 +1795,6 @@ func Seek(fd int, offset int64, whence int) (off int64, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) { - r0, _, e1 := Syscall6(SYS_SELECT, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) { r0, _, e1 := Syscall6(SYS_SENDFILE, uintptr(outfd), uintptr(infd), uintptr(unsafe.Pointer(offset)), uintptr(count), 0, 0) written = int(r0) @@ -1897,21 +1897,6 @@ func Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n i // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Stat(path string, stat *Stat_t) (err error) { - var _p0 *byte - _p0, err = BytePtrFromString(path) - if err != nil { - return - } - _, _, e1 := Syscall(SYS_STAT, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func Statfs(path string, buf *Statfs_t) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go index 8b10ee14452..f91b56c2175 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go @@ -995,6 +995,17 @@ func Nanosleep(time *Timespec, leftover *Timespec) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func PerfEventOpen(attr *PerfEventAttr, pid int, cpu int, groupFd int, flags int) (fd int, err error) { + r0, _, e1 := Syscall6(SYS_PERF_EVENT_OPEN, uintptr(unsafe.Pointer(attr)), uintptr(pid), uintptr(cpu), uintptr(groupFd), uintptr(flags), 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func PivotRoot(newroot string, putold string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(newroot) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go index 8f276d65ff9..52d75952503 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go @@ -995,6 +995,17 @@ func Nanosleep(time *Timespec, leftover *Timespec) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func PerfEventOpen(attr *PerfEventAttr, pid int, cpu int, groupFd int, flags int) (fd int, err error) { + r0, _, e1 := Syscall6(SYS_PERF_EVENT_OPEN, uintptr(unsafe.Pointer(attr)), uintptr(pid), uintptr(cpu), uintptr(groupFd), uintptr(flags), 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func PivotRoot(newroot string, putold string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(newroot) @@ -1541,6 +1552,16 @@ func EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Fadvise(fd int, offset int64, length int64, advice int) (err error) { + _, _, e1 := Syscall6(SYS_FADVISE64, uintptr(fd), uintptr(offset), uintptr(length), uintptr(advice), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Fchown(fd int, uid int, gid int) (err error) { _, _, e1 := Syscall(SYS_FCHOWN, uintptr(fd), uintptr(uid), uintptr(gid)) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go index 61169b331bb..970a5c132d5 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go @@ -995,6 +995,17 @@ func Nanosleep(time *Timespec, leftover *Timespec) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func PerfEventOpen(attr *PerfEventAttr, pid int, cpu int, groupFd int, flags int) (fd int, err error) { + r0, _, e1 := Syscall6(SYS_PERF_EVENT_OPEN, uintptr(unsafe.Pointer(attr)), uintptr(pid), uintptr(cpu), uintptr(groupFd), uintptr(flags), 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func PivotRoot(newroot string, putold string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(newroot) @@ -1534,6 +1545,16 @@ func Dup2(oldfd int, newfd int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Fadvise(fd int, offset int64, length int64, advice int) (err error) { + _, _, e1 := Syscall9(SYS_FADVISE64, uintptr(fd), 0, uintptr(offset>>32), uintptr(offset), uintptr(length>>32), uintptr(length), uintptr(advice), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Fchown(fd int, uid int, gid int) (err error) { _, _, e1 := Syscall(SYS_FCHOWN, uintptr(fd), uintptr(uid), uintptr(gid)) if e1 != 0 { @@ -1737,9 +1758,9 @@ func Shutdown(fd int, how int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error) { - r0, r1, e1 := Syscall6(SYS_SPLICE, uintptr(rfd), uintptr(unsafe.Pointer(roff)), uintptr(wfd), uintptr(unsafe.Pointer(woff)), uintptr(len), uintptr(flags)) - n = int64(int64(r0)<<32 | int64(r1)) +func Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int, err error) { + r0, _, e1 := Syscall6(SYS_SPLICE, uintptr(rfd), uintptr(unsafe.Pointer(roff)), uintptr(wfd), uintptr(unsafe.Pointer(woff)), uintptr(len), uintptr(flags)) + n = int(r0) if e1 != 0 { err = errnoErr(e1) } diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go index 4cb59b4a5c7..b989d0f2826 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go @@ -995,6 +995,17 @@ func Nanosleep(time *Timespec, leftover *Timespec) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func PerfEventOpen(attr *PerfEventAttr, pid int, cpu int, groupFd int, flags int) (fd int, err error) { + r0, _, e1 := Syscall6(SYS_PERF_EVENT_OPEN, uintptr(unsafe.Pointer(attr)), uintptr(pid), uintptr(cpu), uintptr(groupFd), uintptr(flags), 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func PivotRoot(newroot string, putold string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(newroot) @@ -1551,6 +1562,16 @@ func EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Fadvise(fd int, offset int64, length int64, advice int) (err error) { + _, _, e1 := Syscall6(SYS_FADVISE64, uintptr(fd), uintptr(offset), uintptr(length), uintptr(advice), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Fchown(fd int, uid int, gid int) (err error) { _, _, e1 := Syscall(SYS_FCHOWN, uintptr(fd), uintptr(uid), uintptr(gid)) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go index 0b547ae3010..1f8d14cacc5 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go @@ -995,6 +995,17 @@ func Nanosleep(time *Timespec, leftover *Timespec) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func PerfEventOpen(attr *PerfEventAttr, pid int, cpu int, groupFd int, flags int) (fd int, err error) { + r0, _, e1 := Syscall6(SYS_PERF_EVENT_OPEN, uintptr(unsafe.Pointer(attr)), uintptr(pid), uintptr(cpu), uintptr(groupFd), uintptr(flags), 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func PivotRoot(newroot string, putold string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(newroot) @@ -1551,6 +1562,16 @@ func EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Fadvise(fd int, offset int64, length int64, advice int) (err error) { + _, _, e1 := Syscall6(SYS_FADVISE64, uintptr(fd), uintptr(offset), uintptr(length), uintptr(advice), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Fchown(fd int, uid int, gid int) (err error) { _, _, e1 := Syscall(SYS_FCHOWN, uintptr(fd), uintptr(uid), uintptr(gid)) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go index cd94d3a832b..a9c7e520e49 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go @@ -995,6 +995,17 @@ func Nanosleep(time *Timespec, leftover *Timespec) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func PerfEventOpen(attr *PerfEventAttr, pid int, cpu int, groupFd int, flags int) (fd int, err error) { + r0, _, e1 := Syscall6(SYS_PERF_EVENT_OPEN, uintptr(unsafe.Pointer(attr)), uintptr(pid), uintptr(cpu), uintptr(groupFd), uintptr(flags), 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func PivotRoot(newroot string, putold string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(newroot) @@ -1534,6 +1545,16 @@ func Dup2(oldfd int, newfd int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Fadvise(fd int, offset int64, length int64, advice int) (err error) { + _, _, e1 := Syscall9(SYS_FADVISE64, uintptr(fd), 0, uintptr(offset), uintptr(offset>>32), uintptr(length), uintptr(length>>32), uintptr(advice), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Fchown(fd int, uid int, gid int) (err error) { _, _, e1 := Syscall(SYS_FCHOWN, uintptr(fd), uintptr(uid), uintptr(gid)) if e1 != 0 { @@ -1737,9 +1758,9 @@ func Shutdown(fd int, how int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int64, err error) { - r0, r1, e1 := Syscall6(SYS_SPLICE, uintptr(rfd), uintptr(unsafe.Pointer(roff)), uintptr(wfd), uintptr(unsafe.Pointer(woff)), uintptr(len), uintptr(flags)) - n = int64(int64(r1)<<32 | int64(r0)) +func Splice(rfd int, roff *int64, wfd int, woff *int64, len int, flags int) (n int, err error) { + r0, _, e1 := Syscall6(SYS_SPLICE, uintptr(rfd), uintptr(unsafe.Pointer(roff)), uintptr(wfd), uintptr(unsafe.Pointer(woff)), uintptr(len), uintptr(flags)) + n = int(r0) if e1 != 0 { err = errnoErr(e1) } diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go index cdad555a5d5..3bb9a20992f 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go @@ -995,6 +995,17 @@ func Nanosleep(time *Timespec, leftover *Timespec) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func PerfEventOpen(attr *PerfEventAttr, pid int, cpu int, groupFd int, flags int) (fd int, err error) { + r0, _, e1 := Syscall6(SYS_PERF_EVENT_OPEN, uintptr(unsafe.Pointer(attr)), uintptr(pid), uintptr(cpu), uintptr(groupFd), uintptr(flags), 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func PivotRoot(newroot string, putold string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(newroot) @@ -1551,6 +1562,16 @@ func Dup2(oldfd int, newfd int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Fadvise(fd int, offset int64, length int64, advice int) (err error) { + _, _, e1 := Syscall6(SYS_FADVISE64, uintptr(fd), uintptr(offset), uintptr(length), uintptr(advice), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Fchown(fd int, uid int, gid int) (err error) { _, _, e1 := Syscall(SYS_FCHOWN, uintptr(fd), uintptr(uid), uintptr(gid)) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go index 38f4e44b620..56116623d69 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go @@ -995,6 +995,17 @@ func Nanosleep(time *Timespec, leftover *Timespec) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func PerfEventOpen(attr *PerfEventAttr, pid int, cpu int, groupFd int, flags int) (fd int, err error) { + r0, _, e1 := Syscall6(SYS_PERF_EVENT_OPEN, uintptr(unsafe.Pointer(attr)), uintptr(pid), uintptr(cpu), uintptr(groupFd), uintptr(flags), 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func PivotRoot(newroot string, putold string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(newroot) @@ -1551,6 +1562,16 @@ func Dup2(oldfd int, newfd int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Fadvise(fd int, offset int64, length int64, advice int) (err error) { + _, _, e1 := Syscall6(SYS_FADVISE64, uintptr(fd), uintptr(offset), uintptr(length), uintptr(advice), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Fchown(fd int, uid int, gid int) (err error) { _, _, e1 := Syscall(SYS_FCHOWN, uintptr(fd), uintptr(uid), uintptr(gid)) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go index c443baf63f0..9696a0199df 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go @@ -995,6 +995,17 @@ func Nanosleep(time *Timespec, leftover *Timespec) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func PerfEventOpen(attr *PerfEventAttr, pid int, cpu int, groupFd int, flags int) (fd int, err error) { + r0, _, e1 := Syscall6(SYS_PERF_EVENT_OPEN, uintptr(unsafe.Pointer(attr)), uintptr(pid), uintptr(cpu), uintptr(groupFd), uintptr(flags), 0) + fd = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func PivotRoot(newroot string, putold string) (err error) { var _p0 *byte _p0, err = BytePtrFromString(newroot) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go index 2dd98434ead..c01b3b6bab2 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go @@ -1222,6 +1222,16 @@ func EpollWait(epfd int, events []EpollEvent, msec int) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Fadvise(fd int, offset int64, length int64, advice int) (err error) { + _, _, e1 := Syscall6(SYS_FADVISE64, uintptr(fd), uintptr(offset), uintptr(length), uintptr(advice), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Dup2(oldfd int, newfd int) (err error) { _, _, e1 := Syscall(SYS_DUP2, uintptr(oldfd), uintptr(newfd), 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go index 62eadff1c96..fb4b9627829 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go @@ -571,6 +571,16 @@ func Exit(code int) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Fadvise(fd int, offset int64, length int64, advice int) (err error) { + _, _, e1 := Syscall9(SYS_POSIX_FADVISE, uintptr(fd), 0, uintptr(offset), uintptr(offset>>32), 0, uintptr(length), uintptr(length>>32), uintptr(advice), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Fchdir(fd int) (err error) { _, _, e1 := Syscall(SYS_FCHDIR, uintptr(fd), 0, 0) if e1 != 0 { @@ -601,6 +611,21 @@ func Fchmod(fd int, mode uint32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FCHMODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Fchown(fd int, uid int, gid int) (err error) { _, _, e1 := Syscall(SYS_FCHOWN, uintptr(fd), uintptr(uid), uintptr(gid)) if e1 != 0 { @@ -642,6 +667,21 @@ func Fstat(fd int, stat *Stat_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FSTATAT, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Fsync(fd int) (err error) { _, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go index 307f4e99e93..beac82ef86a 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go @@ -571,6 +571,16 @@ func Exit(code int) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Fadvise(fd int, offset int64, length int64, advice int) (err error) { + _, _, e1 := Syscall6(SYS_POSIX_FADVISE, uintptr(fd), 0, uintptr(offset), 0, uintptr(length), uintptr(advice)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Fchdir(fd int) (err error) { _, _, e1 := Syscall(SYS_FCHDIR, uintptr(fd), 0, 0) if e1 != 0 { @@ -601,6 +611,21 @@ func Fchmod(fd int, mode uint32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FCHMODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Fchown(fd int, uid int, gid int) (err error) { _, _, e1 := Syscall(SYS_FCHOWN, uintptr(fd), uintptr(uid), uintptr(gid)) if e1 != 0 { @@ -642,6 +667,21 @@ func Fstat(fd int, stat *Stat_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FSTATAT, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Fsync(fd int) (err error) { _, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go index 61109313c81..7bd5f60b00d 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go @@ -571,6 +571,16 @@ func Exit(code int) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Fadvise(fd int, offset int64, length int64, advice int) (err error) { + _, _, e1 := Syscall9(SYS_POSIX_FADVISE, uintptr(fd), 0, uintptr(offset), uintptr(offset>>32), 0, uintptr(length), uintptr(length>>32), uintptr(advice), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Fchdir(fd int) (err error) { _, _, e1 := Syscall(SYS_FCHDIR, uintptr(fd), 0, 0) if e1 != 0 { @@ -601,6 +611,21 @@ func Fchmod(fd int, mode uint32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FCHMODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Fchown(fd int, uid int, gid int) (err error) { _, _, e1 := Syscall(SYS_FCHOWN, uintptr(fd), uintptr(uid), uintptr(gid)) if e1 != 0 { @@ -642,6 +667,21 @@ func Fstat(fd int, stat *Stat_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FSTATAT, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Fsync(fd int) (err error) { _, _, e1 := Syscall(SYS_FSYNC, uintptr(fd), 0, 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go index 003f820e679..49b3b5e8a43 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go @@ -599,6 +599,21 @@ func Fchmod(fd int, mode uint32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FCHMODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Fchown(fd int, uid int, gid int) (err error) { _, _, e1 := Syscall(SYS_FCHOWN, uintptr(fd), uintptr(uid), uintptr(gid)) if e1 != 0 { @@ -640,6 +655,21 @@ func Fstat(fd int, stat *Stat_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FSTATAT, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Fstatfs(fd int, stat *Statfs_t) (err error) { _, _, e1 := Syscall(SYS_FSTATFS, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { @@ -750,6 +780,17 @@ func Getrlimit(which int, lim *Rlimit) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Getrtable() (rtable int, err error) { + r0, _, e1 := RawSyscall(SYS_GETRTABLE, 0, 0, 0) + rtable = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Getrusage(who int, rusage *Rusage) (err error) { _, _, e1 := RawSyscall(SYS_GETRUSAGE, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0) if e1 != 0 { @@ -1224,6 +1265,16 @@ func Setrlimit(which int, lim *Rlimit) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Setrtable(rtable int) (err error) { + _, _, e1 := RawSyscall(SYS_SETRTABLE, uintptr(rtable), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Setsid() (pid int, err error) { r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0) pid = int(r0) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go index ba0e8f32996..c4c7d8540c0 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go @@ -599,6 +599,21 @@ func Fchmod(fd int, mode uint32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FCHMODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Fchown(fd int, uid int, gid int) (err error) { _, _, e1 := Syscall(SYS_FCHOWN, uintptr(fd), uintptr(uid), uintptr(gid)) if e1 != 0 { @@ -640,6 +655,21 @@ func Fstat(fd int, stat *Stat_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FSTATAT, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Fstatfs(fd int, stat *Statfs_t) (err error) { _, _, e1 := Syscall(SYS_FSTATFS, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { @@ -750,6 +780,17 @@ func Getrlimit(which int, lim *Rlimit) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Getrtable() (rtable int, err error) { + r0, _, e1 := RawSyscall(SYS_GETRTABLE, 0, 0, 0) + rtable = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Getrusage(who int, rusage *Rusage) (err error) { _, _, e1 := RawSyscall(SYS_GETRUSAGE, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0) if e1 != 0 { @@ -1224,6 +1265,16 @@ func Setrlimit(which int, lim *Rlimit) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Setrtable(rtable int) (err error) { + _, _, e1 := RawSyscall(SYS_SETRTABLE, uintptr(rtable), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Setsid() (pid int, err error) { r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0) pid = int(r0) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go index 2ce02c7c4e7..210285b0ba5 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go @@ -599,6 +599,21 @@ func Fchmod(fd int, mode uint32) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FCHMODAT, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Fchown(fd int, uid int, gid int) (err error) { _, _, e1 := Syscall(SYS_FCHOWN, uintptr(fd), uintptr(uid), uintptr(gid)) if e1 != 0 { @@ -640,6 +655,21 @@ func Fstat(fd int, stat *Stat_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_FSTATAT, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Fstatfs(fd int, stat *Statfs_t) (err error) { _, _, e1 := Syscall(SYS_FSTATFS, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0) if e1 != 0 { @@ -750,6 +780,17 @@ func Getrlimit(which int, lim *Rlimit) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Getrtable() (rtable int, err error) { + r0, _, e1 := RawSyscall(SYS_GETRTABLE, 0, 0, 0) + rtable = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Getrusage(who int, rusage *Rusage) (err error) { _, _, e1 := RawSyscall(SYS_GETRUSAGE, uintptr(who), uintptr(unsafe.Pointer(rusage)), 0) if e1 != 0 { @@ -1224,6 +1265,16 @@ func Setrlimit(which int, lim *Rlimit) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Setrtable(rtable int) (err error) { + _, _, e1 := RawSyscall(SYS_SETRTABLE, uintptr(rtable), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Setsid() (pid int, err error) { r0, _, e1 := RawSyscall(SYS_SETSID, 0, 0, 0) pid = int(r0) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go index f5d01b3a883..397896300de 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go @@ -50,6 +50,7 @@ import ( //go:cgo_import_dynamic libc_flock flock "libc.so" //go:cgo_import_dynamic libc_fpathconf fpathconf "libc.so" //go:cgo_import_dynamic libc_fstat fstat "libc.so" +//go:cgo_import_dynamic libc_fstatat fstatat "libc.so" //go:cgo_import_dynamic libc_fstatvfs fstatvfs "libc.so" //go:cgo_import_dynamic libc_getdents getdents "libc.so" //go:cgo_import_dynamic libc_getgid getgid "libc.so" @@ -127,6 +128,7 @@ import ( //go:cgo_import_dynamic libc___xnet_connect __xnet_connect "libsocket.so" //go:cgo_import_dynamic libc_mmap mmap "libc.so" //go:cgo_import_dynamic libc_munmap munmap "libc.so" +//go:cgo_import_dynamic libc_sendfile sendfile "libsendfile.so" //go:cgo_import_dynamic libc___xnet_sendto __xnet_sendto "libsocket.so" //go:cgo_import_dynamic libc___xnet_socket __xnet_socket "libsocket.so" //go:cgo_import_dynamic libc___xnet_socketpair __xnet_socketpair "libsocket.so" @@ -176,6 +178,7 @@ import ( //go:linkname procFlock libc_flock //go:linkname procFpathconf libc_fpathconf //go:linkname procFstat libc_fstat +//go:linkname procFstatat libc_fstatat //go:linkname procFstatvfs libc_fstatvfs //go:linkname procGetdents libc_getdents //go:linkname procGetgid libc_getgid @@ -253,6 +256,7 @@ import ( //go:linkname proc__xnet_connect libc___xnet_connect //go:linkname procmmap libc_mmap //go:linkname procmunmap libc_munmap +//go:linkname procsendfile libc_sendfile //go:linkname proc__xnet_sendto libc___xnet_sendto //go:linkname proc__xnet_socket libc___xnet_socket //go:linkname proc__xnet_socketpair libc___xnet_socketpair @@ -303,6 +307,7 @@ var ( procFlock, procFpathconf, procFstat, + procFstatat, procFstatvfs, procGetdents, procGetgid, @@ -380,6 +385,7 @@ var ( proc__xnet_connect, procmmap, procmunmap, + procsendfile, proc__xnet_sendto, proc__xnet_socket, proc__xnet_socketpair, @@ -772,6 +778,19 @@ func Fstat(fd int, stat *Stat_t) (err error) { return } +func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procFstatat)), 4, uintptr(fd), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(stat)), uintptr(flags), 0, 0) + if e1 != 0 { + err = e1 + } + return +} + func Fstatvfs(fd int, vfsstat *Statvfs_t) (err error) { _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procFstatvfs)), 2, uintptr(fd), uintptr(unsafe.Pointer(vfsstat)), 0, 0, 0, 0) if e1 != 0 { @@ -1573,6 +1592,15 @@ func munmap(addr uintptr, length uintptr) (err error) { return } +func sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) { + r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procsendfile)), 4, uintptr(outfd), uintptr(infd), uintptr(unsafe.Pointer(offset)), uintptr(count), 0, 0) + written = int(r0) + if e1 != 0 { + err = e1 + } + return +} + func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) (err error) { var _p0 *byte if len(buf) > 0 { diff --git a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_amd64.go index 83bb935b91c..207b27938b2 100644 --- a/vendor/golang.org/x/sys/unix/zsysctl_openbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsysctl_openbsd_amd64.go @@ -1,6 +1,8 @@ // mksysctl_openbsd.pl // MACHINE GENERATED BY THE ABOVE COMMAND; DO NOT EDIT +// +build amd64,openbsd + package unix type mibentry struct { @@ -14,6 +16,7 @@ var sysctlMib = []mibentry{ {"ddb.max_line", []_C_int{9, 3}}, {"ddb.max_width", []_C_int{9, 2}}, {"ddb.panic", []_C_int{9, 5}}, + {"ddb.profile", []_C_int{9, 9}}, {"ddb.radix", []_C_int{9, 1}}, {"ddb.tab_stop_width", []_C_int{9, 4}}, {"ddb.trigger", []_C_int{9, 8}}, @@ -29,6 +32,7 @@ var sysctlMib = []mibentry{ {"hw.ncpu", []_C_int{6, 3}}, {"hw.ncpufound", []_C_int{6, 21}}, {"hw.pagesize", []_C_int{6, 7}}, + {"hw.perfpolicy", []_C_int{6, 23}}, {"hw.physmem", []_C_int{6, 19}}, {"hw.product", []_C_int{6, 15}}, {"hw.serialno", []_C_int{6, 17}}, @@ -37,7 +41,7 @@ var sysctlMib = []mibentry{ {"hw.uuid", []_C_int{6, 18}}, {"hw.vendor", []_C_int{6, 14}}, {"hw.version", []_C_int{6, 16}}, - {"kern.arandom", []_C_int{1, 37}}, + {"kern.allowkmem", []_C_int{1, 52}}, {"kern.argmax", []_C_int{1, 8}}, {"kern.boottime", []_C_int{1, 21}}, {"kern.bufcachepercent", []_C_int{1, 72}}, @@ -46,12 +50,13 @@ var sysctlMib = []mibentry{ {"kern.consdev", []_C_int{1, 75}}, {"kern.cp_time", []_C_int{1, 40}}, {"kern.cp_time2", []_C_int{1, 71}}, - {"kern.cryptodevallowsoft", []_C_int{1, 53}}, + {"kern.dnsjackport", []_C_int{1, 13}}, {"kern.domainname", []_C_int{1, 22}}, {"kern.file", []_C_int{1, 73}}, {"kern.forkstat", []_C_int{1, 42}}, {"kern.fscale", []_C_int{1, 46}}, {"kern.fsync", []_C_int{1, 33}}, + {"kern.global_ptrace", []_C_int{1, 81}}, {"kern.hostid", []_C_int{1, 11}}, {"kern.hostname", []_C_int{1, 10}}, {"kern.intrcnt.nintrcnt", []_C_int{1, 63, 1}}, @@ -84,7 +89,6 @@ var sysctlMib = []mibentry{ {"kern.pool_debug", []_C_int{1, 77}}, {"kern.posix1version", []_C_int{1, 17}}, {"kern.proc", []_C_int{1, 66}}, - {"kern.random", []_C_int{1, 31}}, {"kern.rawpartition", []_C_int{1, 24}}, {"kern.saved_ids", []_C_int{1, 20}}, {"kern.securelevel", []_C_int{1, 9}}, @@ -102,21 +106,16 @@ var sysctlMib = []mibentry{ {"kern.timecounter.hardware", []_C_int{1, 69, 3}}, {"kern.timecounter.tick", []_C_int{1, 69, 1}}, {"kern.timecounter.timestepwarnings", []_C_int{1, 69, 2}}, - {"kern.tty.maxptys", []_C_int{1, 44, 6}}, - {"kern.tty.nptys", []_C_int{1, 44, 7}}, {"kern.tty.tk_cancc", []_C_int{1, 44, 4}}, {"kern.tty.tk_nin", []_C_int{1, 44, 1}}, {"kern.tty.tk_nout", []_C_int{1, 44, 2}}, {"kern.tty.tk_rawcc", []_C_int{1, 44, 3}}, {"kern.tty.ttyinfo", []_C_int{1, 44, 5}}, {"kern.ttycount", []_C_int{1, 57}}, - {"kern.userasymcrypto", []_C_int{1, 60}}, - {"kern.usercrypto", []_C_int{1, 52}}, - {"kern.usermount", []_C_int{1, 30}}, {"kern.version", []_C_int{1, 4}}, - {"kern.vnode", []_C_int{1, 13}}, {"kern.watchdog.auto", []_C_int{1, 64, 2}}, {"kern.watchdog.period", []_C_int{1, 64, 1}}, + {"kern.wxabort", []_C_int{1, 74}}, {"net.bpf.bufsize", []_C_int{4, 31, 1}}, {"net.bpf.maxbufsize", []_C_int{4, 31, 2}}, {"net.inet.ah.enable", []_C_int{4, 2, 51, 1}}, @@ -144,7 +143,9 @@ var sysctlMib = []mibentry{ {"net.inet.icmp.stats", []_C_int{4, 2, 1, 7}}, {"net.inet.icmp.tstamprepl", []_C_int{4, 2, 1, 6}}, {"net.inet.igmp.stats", []_C_int{4, 2, 2, 1}}, + {"net.inet.ip.arpdown", []_C_int{4, 2, 0, 40}}, {"net.inet.ip.arpqueued", []_C_int{4, 2, 0, 36}}, + {"net.inet.ip.arptimeout", []_C_int{4, 2, 0, 39}}, {"net.inet.ip.encdebug", []_C_int{4, 2, 0, 12}}, {"net.inet.ip.forwarding", []_C_int{4, 2, 0, 1}}, {"net.inet.ip.ifq.congestion", []_C_int{4, 2, 0, 30, 4}}, @@ -153,8 +154,10 @@ var sysctlMib = []mibentry{ {"net.inet.ip.ifq.maxlen", []_C_int{4, 2, 0, 30, 2}}, {"net.inet.ip.maxqueue", []_C_int{4, 2, 0, 11}}, {"net.inet.ip.mforwarding", []_C_int{4, 2, 0, 31}}, + {"net.inet.ip.mrtmfc", []_C_int{4, 2, 0, 37}}, {"net.inet.ip.mrtproto", []_C_int{4, 2, 0, 34}}, {"net.inet.ip.mrtstats", []_C_int{4, 2, 0, 35}}, + {"net.inet.ip.mrtvif", []_C_int{4, 2, 0, 38}}, {"net.inet.ip.mtu", []_C_int{4, 2, 0, 4}}, {"net.inet.ip.mtudisc", []_C_int{4, 2, 0, 27}}, {"net.inet.ip.mtudisctimeout", []_C_int{4, 2, 0, 28}}, @@ -173,7 +176,6 @@ var sysctlMib = []mibentry{ {"net.inet.ipip.stats", []_C_int{4, 2, 4, 2}}, {"net.inet.mobileip.allow", []_C_int{4, 2, 55, 1}}, {"net.inet.pfsync.stats", []_C_int{4, 2, 240, 1}}, - {"net.inet.pim.stats", []_C_int{4, 2, 103, 1}}, {"net.inet.tcp.ackonpush", []_C_int{4, 2, 6, 13}}, {"net.inet.tcp.always_keepalive", []_C_int{4, 2, 6, 22}}, {"net.inet.tcp.baddynamic", []_C_int{4, 2, 6, 6}}, @@ -187,6 +189,7 @@ var sysctlMib = []mibentry{ {"net.inet.tcp.reasslimit", []_C_int{4, 2, 6, 18}}, {"net.inet.tcp.rfc1323", []_C_int{4, 2, 6, 1}}, {"net.inet.tcp.rfc3390", []_C_int{4, 2, 6, 17}}, + {"net.inet.tcp.rootonly", []_C_int{4, 2, 6, 24}}, {"net.inet.tcp.rstppslimit", []_C_int{4, 2, 6, 12}}, {"net.inet.tcp.sack", []_C_int{4, 2, 6, 10}}, {"net.inet.tcp.sackholelimit", []_C_int{4, 2, 6, 20}}, @@ -194,9 +197,12 @@ var sysctlMib = []mibentry{ {"net.inet.tcp.stats", []_C_int{4, 2, 6, 21}}, {"net.inet.tcp.synbucketlimit", []_C_int{4, 2, 6, 16}}, {"net.inet.tcp.syncachelimit", []_C_int{4, 2, 6, 15}}, + {"net.inet.tcp.synhashsize", []_C_int{4, 2, 6, 25}}, + {"net.inet.tcp.synuselimit", []_C_int{4, 2, 6, 23}}, {"net.inet.udp.baddynamic", []_C_int{4, 2, 17, 2}}, {"net.inet.udp.checksum", []_C_int{4, 2, 17, 1}}, {"net.inet.udp.recvspace", []_C_int{4, 2, 17, 3}}, + {"net.inet.udp.rootonly", []_C_int{4, 2, 17, 6}}, {"net.inet.udp.sendspace", []_C_int{4, 2, 17, 4}}, {"net.inet.udp.stats", []_C_int{4, 2, 17, 5}}, {"net.inet6.divert.recvspace", []_C_int{4, 24, 86, 1}}, @@ -209,13 +215,8 @@ var sysctlMib = []mibentry{ {"net.inet6.icmp6.nd6_delay", []_C_int{4, 24, 30, 8}}, {"net.inet6.icmp6.nd6_maxnudhint", []_C_int{4, 24, 30, 15}}, {"net.inet6.icmp6.nd6_mmaxtries", []_C_int{4, 24, 30, 10}}, - {"net.inet6.icmp6.nd6_prune", []_C_int{4, 24, 30, 6}}, {"net.inet6.icmp6.nd6_umaxtries", []_C_int{4, 24, 30, 9}}, - {"net.inet6.icmp6.nd6_useloopback", []_C_int{4, 24, 30, 11}}, - {"net.inet6.icmp6.nodeinfo", []_C_int{4, 24, 30, 13}}, - {"net.inet6.icmp6.rediraccept", []_C_int{4, 24, 30, 2}}, {"net.inet6.icmp6.redirtimeout", []_C_int{4, 24, 30, 3}}, - {"net.inet6.ip6.accept_rtadv", []_C_int{4, 24, 17, 12}}, {"net.inet6.ip6.auto_flowlabel", []_C_int{4, 24, 17, 17}}, {"net.inet6.ip6.dad_count", []_C_int{4, 24, 17, 16}}, {"net.inet6.ip6.dad_pending", []_C_int{4, 24, 17, 49}}, @@ -228,20 +229,19 @@ var sysctlMib = []mibentry{ {"net.inet6.ip6.maxdynroutes", []_C_int{4, 24, 17, 48}}, {"net.inet6.ip6.maxfragpackets", []_C_int{4, 24, 17, 9}}, {"net.inet6.ip6.maxfrags", []_C_int{4, 24, 17, 41}}, - {"net.inet6.ip6.maxifdefrouters", []_C_int{4, 24, 17, 47}}, - {"net.inet6.ip6.maxifprefixes", []_C_int{4, 24, 17, 46}}, {"net.inet6.ip6.mforwarding", []_C_int{4, 24, 17, 42}}, + {"net.inet6.ip6.mrtmfc", []_C_int{4, 24, 17, 53}}, + {"net.inet6.ip6.mrtmif", []_C_int{4, 24, 17, 52}}, {"net.inet6.ip6.mrtproto", []_C_int{4, 24, 17, 8}}, {"net.inet6.ip6.mtudisctimeout", []_C_int{4, 24, 17, 50}}, {"net.inet6.ip6.multicast_mtudisc", []_C_int{4, 24, 17, 44}}, {"net.inet6.ip6.multipath", []_C_int{4, 24, 17, 43}}, {"net.inet6.ip6.neighborgcthresh", []_C_int{4, 24, 17, 45}}, {"net.inet6.ip6.redirect", []_C_int{4, 24, 17, 2}}, - {"net.inet6.ip6.rr_prune", []_C_int{4, 24, 17, 22}}, + {"net.inet6.ip6.soiikey", []_C_int{4, 24, 17, 54}}, {"net.inet6.ip6.sourcecheck", []_C_int{4, 24, 17, 10}}, {"net.inet6.ip6.sourcecheck_logint", []_C_int{4, 24, 17, 11}}, {"net.inet6.ip6.use_deprecated", []_C_int{4, 24, 17, 21}}, - {"net.inet6.ip6.v6only", []_C_int{4, 24, 17, 24}}, {"net.key.sadb_dump", []_C_int{4, 30, 1}}, {"net.key.spd_dump", []_C_int{4, 30, 2}}, {"net.mpls.ifq.congestion", []_C_int{4, 33, 3, 4}}, @@ -254,17 +254,4 @@ var sysctlMib = []mibentry{ {"net.mpls.ttl", []_C_int{4, 33, 2}}, {"net.pflow.stats", []_C_int{4, 34, 1}}, {"net.pipex.enable", []_C_int{4, 35, 1}}, - {"vm.anonmin", []_C_int{2, 7}}, - {"vm.loadavg", []_C_int{2, 2}}, - {"vm.maxslp", []_C_int{2, 10}}, - {"vm.nkmempages", []_C_int{2, 6}}, - {"vm.psstrings", []_C_int{2, 3}}, - {"vm.swapencrypt.enable", []_C_int{2, 5, 0}}, - {"vm.swapencrypt.keyscreated", []_C_int{2, 5, 1}}, - {"vm.swapencrypt.keysdeleted", []_C_int{2, 5, 2}}, - {"vm.uspace", []_C_int{2, 11}}, - {"vm.uvmexp", []_C_int{2, 4}}, - {"vm.vmmeter", []_C_int{2, 1}}, - {"vm.vnodemin", []_C_int{2, 9}}, - {"vm.vtextmin", []_C_int{2, 8}}, } diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go index dfe5dab67ee..384d49bfa5a 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go @@ -367,4 +367,7 @@ const ( SYS_PWRITEV2 = 381 SYS_KEXEC_FILE_LOAD = 382 SYS_STATX = 383 + SYS_PKEY_ALLOC = 384 + SYS_PKEY_FREE = 385 + SYS_PKEY_MPROTECT = 386 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go index eca97f738b7..9623248a5ef 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go @@ -367,4 +367,7 @@ const ( SYS_PWRITEV2 = 381 SYS_KEXEC_FILE_LOAD = 382 SYS_STATX = 383 + SYS_PKEY_ALLOC = 384 + SYS_PKEY_FREE = 385 + SYS_PKEY_MPROTECT = 386 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go index 8bf50c8d43b..ed92409d97f 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go @@ -109,6 +109,7 @@ const ( SYS_PERSONALITY = 136 SYS_AFS_SYSCALL = 137 SYS_GETDENTS = 141 + SYS_SELECT = 142 SYS_FLOCK = 143 SYS_MSYNC = 144 SYS_READV = 145 @@ -151,6 +152,26 @@ const ( SYS_GETPMSG = 188 SYS_PUTPMSG = 189 SYS_VFORK = 190 + SYS_GETRLIMIT = 191 + SYS_LCHOWN = 198 + SYS_GETUID = 199 + SYS_GETGID = 200 + SYS_GETEUID = 201 + SYS_GETEGID = 202 + SYS_SETREUID = 203 + SYS_SETREGID = 204 + SYS_GETGROUPS = 205 + SYS_SETGROUPS = 206 + SYS_FCHOWN = 207 + SYS_SETRESUID = 208 + SYS_GETRESUID = 209 + SYS_SETRESGID = 210 + SYS_GETRESGID = 211 + SYS_CHOWN = 212 + SYS_SETUID = 213 + SYS_SETGID = 214 + SYS_SETFSUID = 215 + SYS_SETFSGID = 216 SYS_PIVOT_ROOT = 217 SYS_MINCORE = 218 SYS_MADVISE = 219 @@ -222,6 +243,7 @@ const ( SYS_MKNODAT = 290 SYS_FCHOWNAT = 291 SYS_FUTIMESAT = 292 + SYS_NEWFSTATAT = 293 SYS_UNLINKAT = 294 SYS_RENAMEAT = 295 SYS_LINKAT = 296 @@ -309,26 +331,4 @@ const ( SYS_S390_GUARDED_STORAGE = 378 SYS_STATX = 379 SYS_S390_STHYI = 380 - SYS_SELECT = 142 - SYS_GETRLIMIT = 191 - SYS_LCHOWN = 198 - SYS_GETUID = 199 - SYS_GETGID = 200 - SYS_GETEUID = 201 - SYS_GETEGID = 202 - SYS_SETREUID = 203 - SYS_SETREGID = 204 - SYS_GETGROUPS = 205 - SYS_SETGROUPS = 206 - SYS_FCHOWN = 207 - SYS_SETRESUID = 208 - SYS_GETRESUID = 209 - SYS_SETRESGID = 210 - SYS_GETRESGID = 211 - SYS_CHOWN = 212 - SYS_SETUID = 213 - SYS_SETGID = 214 - SYS_SETFSUID = 215 - SYS_SETFSGID = 216 - SYS_NEWFSTATAT = 293 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_amd64.go index bd28146ddd5..10edff07d57 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_openbsd_amd64.go @@ -1,5 +1,5 @@ // mksysnum_openbsd.pl -// MACHINE GENERATED BY THE ABOVE COMMAND; DO NOT EDIT +// Code generated by the command above; see README.md. DO NOT EDIT. // +build amd64,openbsd @@ -12,6 +12,7 @@ const ( SYS_WRITE = 4 // { ssize_t sys_write(int fd, const void *buf, \ SYS_OPEN = 5 // { int sys_open(const char *path, \ SYS_CLOSE = 6 // { int sys_close(int fd); } + SYS_GETENTROPY = 7 // { int sys_getentropy(void *buf, size_t nbyte); } SYS___TFORK = 8 // { int sys___tfork(const struct __tfork *param, \ SYS_LINK = 9 // { int sys_link(const char *path, const char *link); } SYS_UNLINK = 10 // { int sys_unlink(const char *path); } @@ -37,11 +38,10 @@ const ( SYS_ACCEPT = 30 // { int sys_accept(int s, struct sockaddr *name, \ SYS_GETPEERNAME = 31 // { int sys_getpeername(int fdes, struct sockaddr *asa, \ SYS_GETSOCKNAME = 32 // { int sys_getsockname(int fdes, struct sockaddr *asa, \ - SYS_ACCESS = 33 // { int sys_access(const char *path, int flags); } + SYS_ACCESS = 33 // { int sys_access(const char *path, int amode); } SYS_CHFLAGS = 34 // { int sys_chflags(const char *path, u_int flags); } SYS_FCHFLAGS = 35 // { int sys_fchflags(int fd, u_int flags); } SYS_SYNC = 36 // { void sys_sync(void); } - SYS_KILL = 37 // { int sys_kill(int pid, int signum); } SYS_STAT = 38 // { int sys_stat(const char *path, struct stat *ub); } SYS_GETPPID = 39 // { pid_t sys_getppid(void); } SYS_LSTAT = 40 // { int sys_lstat(const char *path, struct stat *ub); } @@ -53,7 +53,6 @@ const ( SYS_SIGACTION = 46 // { int sys_sigaction(int signum, \ SYS_GETGID = 47 // { gid_t sys_getgid(void); } SYS_SIGPROCMASK = 48 // { int sys_sigprocmask(int how, sigset_t mask); } - SYS_GETLOGIN = 49 // { int sys_getlogin(char *namebuf, u_int namelen); } SYS_SETLOGIN = 50 // { int sys_setlogin(const char *namebuf); } SYS_ACCT = 51 // { int sys_acct(const char *path); } SYS_SIGPENDING = 52 // { int sys_sigpending(void); } @@ -62,7 +61,7 @@ const ( SYS_REBOOT = 55 // { int sys_reboot(int opt); } SYS_REVOKE = 56 // { int sys_revoke(const char *path); } SYS_SYMLINK = 57 // { int sys_symlink(const char *path, \ - SYS_READLINK = 58 // { int sys_readlink(const char *path, char *buf, \ + SYS_READLINK = 58 // { ssize_t sys_readlink(const char *path, \ SYS_EXECVE = 59 // { int sys_execve(const char *path, \ SYS_UMASK = 60 // { mode_t sys_umask(mode_t newmask); } SYS_CHROOT = 61 // { int sys_chroot(const char *path); } @@ -86,15 +85,18 @@ const ( SYS_GETGROUPS = 79 // { int sys_getgroups(int gidsetsize, \ SYS_SETGROUPS = 80 // { int sys_setgroups(int gidsetsize, \ SYS_GETPGRP = 81 // { int sys_getpgrp(void); } - SYS_SETPGID = 82 // { int sys_setpgid(pid_t pid, int pgid); } + SYS_SETPGID = 82 // { int sys_setpgid(pid_t pid, pid_t pgid); } + SYS_FUTEX = 83 // { int sys_futex(uint32_t *f, int op, int val, \ SYS_UTIMENSAT = 84 // { int sys_utimensat(int fd, const char *path, \ SYS_FUTIMENS = 85 // { int sys_futimens(int fd, \ + SYS_KBIND = 86 // { int sys_kbind(const struct __kbind *param, \ SYS_CLOCK_GETTIME = 87 // { int sys_clock_gettime(clockid_t clock_id, \ SYS_CLOCK_SETTIME = 88 // { int sys_clock_settime(clockid_t clock_id, \ SYS_CLOCK_GETRES = 89 // { int sys_clock_getres(clockid_t clock_id, \ SYS_DUP2 = 90 // { int sys_dup2(int from, int to); } SYS_NANOSLEEP = 91 // { int sys_nanosleep(const struct timespec *rqtp, \ SYS_FCNTL = 92 // { int sys_fcntl(int fd, int cmd, ... void *arg); } + SYS_ACCEPT4 = 93 // { int sys_accept4(int s, struct sockaddr *name, \ SYS___THRSLEEP = 94 // { int sys___thrsleep(const volatile void *ident, \ SYS_FSYNC = 95 // { int sys_fsync(int fd); } SYS_SETPRIORITY = 96 // { int sys_setpriority(int which, id_t who, int prio); } @@ -102,16 +104,23 @@ const ( SYS_CONNECT = 98 // { int sys_connect(int s, const struct sockaddr *name, \ SYS_GETDENTS = 99 // { int sys_getdents(int fd, void *buf, size_t buflen); } SYS_GETPRIORITY = 100 // { int sys_getpriority(int which, id_t who); } + SYS_PIPE2 = 101 // { int sys_pipe2(int *fdp, int flags); } + SYS_DUP3 = 102 // { int sys_dup3(int from, int to, int flags); } SYS_SIGRETURN = 103 // { int sys_sigreturn(struct sigcontext *sigcntxp); } SYS_BIND = 104 // { int sys_bind(int s, const struct sockaddr *name, \ SYS_SETSOCKOPT = 105 // { int sys_setsockopt(int s, int level, int name, \ SYS_LISTEN = 106 // { int sys_listen(int s, int backlog); } + SYS_CHFLAGSAT = 107 // { int sys_chflagsat(int fd, const char *path, \ + SYS_PLEDGE = 108 // { int sys_pledge(const char *promises, \ SYS_PPOLL = 109 // { int sys_ppoll(struct pollfd *fds, \ SYS_PSELECT = 110 // { int sys_pselect(int nd, fd_set *in, fd_set *ou, \ SYS_SIGSUSPEND = 111 // { int sys_sigsuspend(int mask); } + SYS_SENDSYSLOG = 112 // { int sys_sendsyslog(const char *buf, size_t nbyte, \ SYS_GETSOCKOPT = 118 // { int sys_getsockopt(int s, int level, int name, \ + SYS_THRKILL = 119 // { int sys_thrkill(pid_t tid, int signum, void *tcb); } SYS_READV = 120 // { ssize_t sys_readv(int fd, \ SYS_WRITEV = 121 // { ssize_t sys_writev(int fd, \ + SYS_KILL = 122 // { int sys_kill(int pid, int signum); } SYS_FCHOWN = 123 // { int sys_fchown(int fd, uid_t uid, gid_t gid); } SYS_FCHMOD = 124 // { int sys_fchmod(int fd, mode_t mode); } SYS_SETREUID = 126 // { int sys_setreuid(uid_t ruid, uid_t euid); } @@ -125,6 +134,7 @@ const ( SYS_MKDIR = 136 // { int sys_mkdir(const char *path, mode_t mode); } SYS_RMDIR = 137 // { int sys_rmdir(const char *path); } SYS_ADJTIME = 140 // { int sys_adjtime(const struct timeval *delta, \ + SYS_GETLOGIN_R = 141 // { int sys_getlogin_r(char *namebuf, u_int namelen); } SYS_SETSID = 147 // { int sys_setsid(void); } SYS_QUOTACTL = 148 // { int sys_quotactl(const char *path, int cmd, \ SYS_NFSSVC = 155 // { int sys_nfssvc(int flag, void *argp); } @@ -144,7 +154,7 @@ const ( SYS_LSEEK = 199 // { off_t sys_lseek(int fd, int pad, off_t offset, \ SYS_TRUNCATE = 200 // { int sys_truncate(const char *path, int pad, \ SYS_FTRUNCATE = 201 // { int sys_ftruncate(int fd, int pad, off_t length); } - SYS___SYSCTL = 202 // { int sys___sysctl(const int *name, u_int namelen, \ + SYS_SYSCTL = 202 // { int sys_sysctl(const int *name, u_int namelen, \ SYS_MLOCK = 203 // { int sys_mlock(const void *addr, size_t len); } SYS_MUNLOCK = 204 // { int sys_munlock(const void *addr, size_t len); } SYS_GETPGID = 207 // { pid_t sys_getpgid(pid_t pid); } diff --git a/vendor/golang.org/x/sys/unix/ztypes_darwin_386.go b/vendor/golang.org/x/sys/unix/ztypes_darwin_386.go index bc4bc89f840..327af5fba16 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_darwin_386.go +++ b/vendor/golang.org/x/sys/unix/ztypes_darwin_386.go @@ -136,13 +136,13 @@ type Fsid struct { } type Dirent struct { - Ino uint64 - Seekoff uint64 - Reclen uint16 - Namlen uint16 - Type uint8 - Name [1024]int8 - Pad_cgo_0 [3]byte + Ino uint64 + Seekoff uint64 + Reclen uint16 + Namlen uint16 + Type uint8 + Name [1024]int8 + _ [3]byte } type RawSockaddrInet4 struct { @@ -295,14 +295,14 @@ const ( ) type IfMsghdr struct { - Msglen uint16 - Version uint8 - Type uint8 - Addrs int32 - Flags int32 - Index uint16 - Pad_cgo_0 [2]byte - Data IfData + Msglen uint16 + Version uint8 + Type uint8 + Addrs int32 + Flags int32 + Index uint16 + _ [2]byte + Data IfData } type IfData struct { @@ -338,51 +338,51 @@ type IfData struct { } type IfaMsghdr struct { - Msglen uint16 - Version uint8 - Type uint8 - Addrs int32 - Flags int32 - Index uint16 - Pad_cgo_0 [2]byte - Metric int32 + Msglen uint16 + Version uint8 + Type uint8 + Addrs int32 + Flags int32 + Index uint16 + _ [2]byte + Metric int32 } type IfmaMsghdr struct { - Msglen uint16 - Version uint8 - Type uint8 - Addrs int32 - Flags int32 - Index uint16 - Pad_cgo_0 [2]byte + Msglen uint16 + Version uint8 + Type uint8 + Addrs int32 + Flags int32 + Index uint16 + _ [2]byte } type IfmaMsghdr2 struct { - Msglen uint16 - Version uint8 - Type uint8 - Addrs int32 - Flags int32 - Index uint16 - Pad_cgo_0 [2]byte - Refcount int32 + Msglen uint16 + Version uint8 + Type uint8 + Addrs int32 + Flags int32 + Index uint16 + _ [2]byte + Refcount int32 } type RtMsghdr struct { - Msglen uint16 - Version uint8 - Type uint8 - Index uint16 - Pad_cgo_0 [2]byte - Flags int32 - Addrs int32 - Pid int32 - Seq int32 - Errno int32 - Use int32 - Inits uint32 - Rmx RtMetrics + Msglen uint16 + Version uint8 + Type uint8 + Index uint16 + _ [2]byte + Flags int32 + Addrs int32 + Pid int32 + Seq int32 + Errno int32 + Use int32 + Inits uint32 + Rmx RtMetrics } type RtMetrics struct { @@ -430,11 +430,11 @@ type BpfInsn struct { } type BpfHdr struct { - Tstamp Timeval - Caplen uint32 - Datalen uint32 - Hdrlen uint16 - Pad_cgo_0 [2]byte + Tstamp Timeval + Caplen uint32 + Datalen uint32 + Hdrlen uint16 + _ [2]byte } type Termios struct { diff --git a/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go index d8abcab1213..116e6e07578 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go @@ -26,9 +26,9 @@ type Timespec struct { } type Timeval struct { - Sec int64 - Usec int32 - Pad_cgo_0 [4]byte + Sec int64 + Usec int32 + _ [4]byte } type Timeval32 struct { @@ -70,7 +70,7 @@ type Stat_t struct { Uid uint32 Gid uint32 Rdev int32 - Pad_cgo_0 [4]byte + _ [4]byte Atimespec Timespec Mtimespec Timespec Ctimespec Timespec @@ -120,9 +120,9 @@ type Fstore_t struct { } type Radvisory_t struct { - Offset int64 - Count int32 - Pad_cgo_0 [4]byte + Offset int64 + Count int32 + _ [4]byte } type Fbootstraptransfer_t struct { @@ -132,9 +132,9 @@ type Fbootstraptransfer_t struct { } type Log2phys_t struct { - Flags uint32 - Pad_cgo_0 [8]byte - Pad_cgo_1 [8]byte + Flags uint32 + _ [8]byte + _ [8]byte } type Fsid struct { @@ -142,13 +142,13 @@ type Fsid struct { } type Dirent struct { - Ino uint64 - Seekoff uint64 - Reclen uint16 - Namlen uint16 - Type uint8 - Name [1024]int8 - Pad_cgo_0 [3]byte + Ino uint64 + Seekoff uint64 + Reclen uint16 + Namlen uint16 + Type uint8 + Name [1024]int8 + _ [3]byte } type RawSockaddrInet4 struct { @@ -221,10 +221,10 @@ type IPv6Mreq struct { type Msghdr struct { Name *byte Namelen uint32 - Pad_cgo_0 [4]byte + _ [4]byte Iov *Iovec Iovlen int32 - Pad_cgo_1 [4]byte + _ [4]byte Control *byte Controllen uint32 Flags int32 @@ -303,14 +303,14 @@ const ( ) type IfMsghdr struct { - Msglen uint16 - Version uint8 - Type uint8 - Addrs int32 - Flags int32 - Index uint16 - Pad_cgo_0 [2]byte - Data IfData + Msglen uint16 + Version uint8 + Type uint8 + Addrs int32 + Flags int32 + Index uint16 + _ [2]byte + Data IfData } type IfData struct { @@ -346,51 +346,51 @@ type IfData struct { } type IfaMsghdr struct { - Msglen uint16 - Version uint8 - Type uint8 - Addrs int32 - Flags int32 - Index uint16 - Pad_cgo_0 [2]byte - Metric int32 + Msglen uint16 + Version uint8 + Type uint8 + Addrs int32 + Flags int32 + Index uint16 + _ [2]byte + Metric int32 } type IfmaMsghdr struct { - Msglen uint16 - Version uint8 - Type uint8 - Addrs int32 - Flags int32 - Index uint16 - Pad_cgo_0 [2]byte + Msglen uint16 + Version uint8 + Type uint8 + Addrs int32 + Flags int32 + Index uint16 + _ [2]byte } type IfmaMsghdr2 struct { - Msglen uint16 - Version uint8 - Type uint8 - Addrs int32 - Flags int32 - Index uint16 - Pad_cgo_0 [2]byte - Refcount int32 + Msglen uint16 + Version uint8 + Type uint8 + Addrs int32 + Flags int32 + Index uint16 + _ [2]byte + Refcount int32 } type RtMsghdr struct { - Msglen uint16 - Version uint8 - Type uint8 - Index uint16 - Pad_cgo_0 [2]byte - Flags int32 - Addrs int32 - Pid int32 - Seq int32 - Errno int32 - Use int32 - Inits uint32 - Rmx RtMetrics + Msglen uint16 + Version uint8 + Type uint8 + Index uint16 + _ [2]byte + Flags int32 + Addrs int32 + Pid int32 + Seq int32 + Errno int32 + Use int32 + Inits uint32 + Rmx RtMetrics } type RtMetrics struct { @@ -426,9 +426,9 @@ type BpfStat struct { } type BpfProgram struct { - Len uint32 - Pad_cgo_0 [4]byte - Insns *BpfInsn + Len uint32 + _ [4]byte + Insns *BpfInsn } type BpfInsn struct { @@ -439,22 +439,22 @@ type BpfInsn struct { } type BpfHdr struct { - Tstamp Timeval32 - Caplen uint32 - Datalen uint32 - Hdrlen uint16 - Pad_cgo_0 [2]byte + Tstamp Timeval32 + Caplen uint32 + Datalen uint32 + Hdrlen uint16 + _ [2]byte } type Termios struct { - Iflag uint64 - Oflag uint64 - Cflag uint64 - Lflag uint64 - Cc [20]uint8 - Pad_cgo_0 [4]byte - Ispeed uint64 - Ospeed uint64 + Iflag uint64 + Oflag uint64 + Cflag uint64 + Lflag uint64 + Cc [20]uint8 + _ [4]byte + Ispeed uint64 + Ospeed uint64 } type Winsize struct { diff --git a/vendor/golang.org/x/sys/unix/ztypes_darwin_arm.go b/vendor/golang.org/x/sys/unix/ztypes_darwin_arm.go index 9749c9f7d15..2750ad76070 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_darwin_arm.go +++ b/vendor/golang.org/x/sys/unix/ztypes_darwin_arm.go @@ -137,13 +137,13 @@ type Fsid struct { } type Dirent struct { - Ino uint64 - Seekoff uint64 - Reclen uint16 - Namlen uint16 - Type uint8 - Name [1024]int8 - Pad_cgo_0 [3]byte + Ino uint64 + Seekoff uint64 + Reclen uint16 + Namlen uint16 + Type uint8 + Name [1024]int8 + _ [3]byte } type RawSockaddrInet4 struct { @@ -296,14 +296,14 @@ const ( ) type IfMsghdr struct { - Msglen uint16 - Version uint8 - Type uint8 - Addrs int32 - Flags int32 - Index uint16 - Pad_cgo_0 [2]byte - Data IfData + Msglen uint16 + Version uint8 + Type uint8 + Addrs int32 + Flags int32 + Index uint16 + _ [2]byte + Data IfData } type IfData struct { @@ -339,51 +339,51 @@ type IfData struct { } type IfaMsghdr struct { - Msglen uint16 - Version uint8 - Type uint8 - Addrs int32 - Flags int32 - Index uint16 - Pad_cgo_0 [2]byte - Metric int32 + Msglen uint16 + Version uint8 + Type uint8 + Addrs int32 + Flags int32 + Index uint16 + _ [2]byte + Metric int32 } type IfmaMsghdr struct { - Msglen uint16 - Version uint8 - Type uint8 - Addrs int32 - Flags int32 - Index uint16 - Pad_cgo_0 [2]byte + Msglen uint16 + Version uint8 + Type uint8 + Addrs int32 + Flags int32 + Index uint16 + _ [2]byte } type IfmaMsghdr2 struct { - Msglen uint16 - Version uint8 - Type uint8 - Addrs int32 - Flags int32 - Index uint16 - Pad_cgo_0 [2]byte - Refcount int32 + Msglen uint16 + Version uint8 + Type uint8 + Addrs int32 + Flags int32 + Index uint16 + _ [2]byte + Refcount int32 } type RtMsghdr struct { - Msglen uint16 - Version uint8 - Type uint8 - Index uint16 - Pad_cgo_0 [2]byte - Flags int32 - Addrs int32 - Pid int32 - Seq int32 - Errno int32 - Use int32 - Inits uint32 - Rmx RtMetrics + Msglen uint16 + Version uint8 + Type uint8 + Index uint16 + _ [2]byte + Flags int32 + Addrs int32 + Pid int32 + Seq int32 + Errno int32 + Use int32 + Inits uint32 + Rmx RtMetrics } type RtMetrics struct { @@ -431,11 +431,11 @@ type BpfInsn struct { } type BpfHdr struct { - Tstamp Timeval - Caplen uint32 - Datalen uint32 - Hdrlen uint16 - Pad_cgo_0 [2]byte + Tstamp Timeval + Caplen uint32 + Datalen uint32 + Hdrlen uint16 + _ [2]byte } type Termios struct { diff --git a/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go index 810b0bd4f6c..8cead0996c8 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go @@ -26,9 +26,9 @@ type Timespec struct { } type Timeval struct { - Sec int64 - Usec int32 - Pad_cgo_0 [4]byte + Sec int64 + Usec int32 + _ [4]byte } type Timeval32 struct { @@ -70,7 +70,7 @@ type Stat_t struct { Uid uint32 Gid uint32 Rdev int32 - Pad_cgo_0 [4]byte + _ [4]byte Atimespec Timespec Mtimespec Timespec Ctimespec Timespec @@ -120,9 +120,9 @@ type Fstore_t struct { } type Radvisory_t struct { - Offset int64 - Count int32 - Pad_cgo_0 [4]byte + Offset int64 + Count int32 + _ [4]byte } type Fbootstraptransfer_t struct { @@ -132,9 +132,9 @@ type Fbootstraptransfer_t struct { } type Log2phys_t struct { - Flags uint32 - Pad_cgo_0 [8]byte - Pad_cgo_1 [8]byte + Flags uint32 + _ [8]byte + _ [8]byte } type Fsid struct { @@ -142,13 +142,13 @@ type Fsid struct { } type Dirent struct { - Ino uint64 - Seekoff uint64 - Reclen uint16 - Namlen uint16 - Type uint8 - Name [1024]int8 - Pad_cgo_0 [3]byte + Ino uint64 + Seekoff uint64 + Reclen uint16 + Namlen uint16 + Type uint8 + Name [1024]int8 + _ [3]byte } type RawSockaddrInet4 struct { @@ -221,10 +221,10 @@ type IPv6Mreq struct { type Msghdr struct { Name *byte Namelen uint32 - Pad_cgo_0 [4]byte + _ [4]byte Iov *Iovec Iovlen int32 - Pad_cgo_1 [4]byte + _ [4]byte Control *byte Controllen uint32 Flags int32 @@ -303,14 +303,14 @@ const ( ) type IfMsghdr struct { - Msglen uint16 - Version uint8 - Type uint8 - Addrs int32 - Flags int32 - Index uint16 - Pad_cgo_0 [2]byte - Data IfData + Msglen uint16 + Version uint8 + Type uint8 + Addrs int32 + Flags int32 + Index uint16 + _ [2]byte + Data IfData } type IfData struct { @@ -346,51 +346,51 @@ type IfData struct { } type IfaMsghdr struct { - Msglen uint16 - Version uint8 - Type uint8 - Addrs int32 - Flags int32 - Index uint16 - Pad_cgo_0 [2]byte - Metric int32 + Msglen uint16 + Version uint8 + Type uint8 + Addrs int32 + Flags int32 + Index uint16 + _ [2]byte + Metric int32 } type IfmaMsghdr struct { - Msglen uint16 - Version uint8 - Type uint8 - Addrs int32 - Flags int32 - Index uint16 - Pad_cgo_0 [2]byte + Msglen uint16 + Version uint8 + Type uint8 + Addrs int32 + Flags int32 + Index uint16 + _ [2]byte } type IfmaMsghdr2 struct { - Msglen uint16 - Version uint8 - Type uint8 - Addrs int32 - Flags int32 - Index uint16 - Pad_cgo_0 [2]byte - Refcount int32 + Msglen uint16 + Version uint8 + Type uint8 + Addrs int32 + Flags int32 + Index uint16 + _ [2]byte + Refcount int32 } type RtMsghdr struct { - Msglen uint16 - Version uint8 - Type uint8 - Index uint16 - Pad_cgo_0 [2]byte - Flags int32 - Addrs int32 - Pid int32 - Seq int32 - Errno int32 - Use int32 - Inits uint32 - Rmx RtMetrics + Msglen uint16 + Version uint8 + Type uint8 + Index uint16 + _ [2]byte + Flags int32 + Addrs int32 + Pid int32 + Seq int32 + Errno int32 + Use int32 + Inits uint32 + Rmx RtMetrics } type RtMetrics struct { @@ -426,9 +426,9 @@ type BpfStat struct { } type BpfProgram struct { - Len uint32 - Pad_cgo_0 [4]byte - Insns *BpfInsn + Len uint32 + _ [4]byte + Insns *BpfInsn } type BpfInsn struct { @@ -439,22 +439,22 @@ type BpfInsn struct { } type BpfHdr struct { - Tstamp Timeval32 - Caplen uint32 - Datalen uint32 - Hdrlen uint16 - Pad_cgo_0 [2]byte + Tstamp Timeval32 + Caplen uint32 + Datalen uint32 + Hdrlen uint16 + _ [2]byte } type Termios struct { - Iflag uint64 - Oflag uint64 - Cflag uint64 - Lflag uint64 - Cc [20]uint8 - Pad_cgo_0 [4]byte - Ispeed uint64 - Ospeed uint64 + Iflag uint64 + Oflag uint64 + Cflag uint64 + Lflag uint64 + Cc [20]uint8 + _ [4]byte + Ispeed uint64 + Ospeed uint64 } type Winsize struct { diff --git a/vendor/golang.org/x/sys/unix/ztypes_dragonfly_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_dragonfly_amd64.go index e3b8ebb0169..315a553bd5b 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_dragonfly_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_dragonfly_amd64.go @@ -108,7 +108,7 @@ type Statfs_t struct { Owner uint32 Type int32 Flags int32 - Pad_cgo_0 [4]byte + _ [4]byte Syncwrites int64 Asyncwrites int64 Fstypename [16]int8 @@ -118,7 +118,7 @@ type Statfs_t struct { Spares1 int16 Mntfromname [80]int8 Spares2 int16 - Pad_cgo_1 [4]byte + _ [4]byte Spare [2]int64 } @@ -219,10 +219,10 @@ type IPv6Mreq struct { type Msghdr struct { Name *byte Namelen uint32 - Pad_cgo_0 [4]byte + _ [4]byte Iov *Iovec Iovlen int32 - Pad_cgo_1 [4]byte + _ [4]byte Control *byte Controllen uint32 Flags int32 @@ -294,14 +294,14 @@ const ( ) type IfMsghdr struct { - Msglen uint16 - Version uint8 - Type uint8 - Addrs int32 - Flags int32 - Index uint16 - Pad_cgo_0 [2]byte - Data IfData + Msglen uint16 + Version uint8 + Type uint8 + Addrs int32 + Flags int32 + Index uint16 + _ [2]byte + Data IfData } type IfData struct { @@ -311,7 +311,7 @@ type IfData struct { Hdrlen uint8 Recvquota uint8 Xmitquota uint8 - Pad_cgo_0 [2]byte + _ [2]byte Mtu uint64 Metric uint64 Link_state uint64 @@ -333,24 +333,24 @@ type IfData struct { } type IfaMsghdr struct { - Msglen uint16 - Version uint8 - Type uint8 - Addrs int32 - Flags int32 - Index uint16 - Pad_cgo_0 [2]byte - Metric int32 + Msglen uint16 + Version uint8 + Type uint8 + Addrs int32 + Flags int32 + Index uint16 + _ [2]byte + Metric int32 } type IfmaMsghdr struct { - Msglen uint16 - Version uint8 - Type uint8 - Addrs int32 - Flags int32 - Index uint16 - Pad_cgo_0 [2]byte + Msglen uint16 + Version uint8 + Type uint8 + Addrs int32 + Flags int32 + Index uint16 + _ [2]byte } type IfAnnounceMsghdr struct { @@ -363,19 +363,19 @@ type IfAnnounceMsghdr struct { } type RtMsghdr struct { - Msglen uint16 - Version uint8 - Type uint8 - Index uint16 - Pad_cgo_0 [2]byte - Flags int32 - Addrs int32 - Pid int32 - Seq int32 - Errno int32 - Use int32 - Inits uint64 - Rmx RtMetrics + Msglen uint16 + Version uint8 + Type uint8 + Index uint16 + _ [2]byte + Flags int32 + Addrs int32 + Pid int32 + Seq int32 + Errno int32 + Use int32 + Inits uint64 + Rmx RtMetrics } type RtMetrics struct { @@ -391,7 +391,7 @@ type RtMetrics struct { Hopcount uint64 Mssopt uint16 Pad uint16 - Pad_cgo_0 [4]byte + _ [4]byte Msl uint64 Iwmaxsegs uint64 Iwcapsegs uint64 @@ -416,9 +416,9 @@ type BpfStat struct { } type BpfProgram struct { - Len uint32 - Pad_cgo_0 [4]byte - Insns *BpfInsn + Len uint32 + _ [4]byte + Insns *BpfInsn } type BpfInsn struct { @@ -429,11 +429,11 @@ type BpfInsn struct { } type BpfHdr struct { - Tstamp Timeval - Caplen uint32 - Datalen uint32 - Hdrlen uint16 - Pad_cgo_0 [6]byte + Tstamp Timeval + Caplen uint32 + Datalen uint32 + Hdrlen uint16 + _ [6]byte } type Termios struct { diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_386.go b/vendor/golang.org/x/sys/unix/ztypes_linux_386.go index f9a99358a41..40474620810 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_386.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_386.go @@ -96,45 +96,30 @@ type Rlimit struct { type _Gid_t uint32 type Stat_t struct { - Dev uint64 - X__pad1 uint16 - _ [2]byte - X__st_ino uint32 - Mode uint32 - Nlink uint32 - Uid uint32 - Gid uint32 - Rdev uint64 - X__pad2 uint16 - _ [2]byte - Size int64 - Blksize int32 - Blocks int64 - Atim Timespec - Mtim Timespec - Ctim Timespec - Ino uint64 -} - -type Statfs_t struct { - Type int32 - Bsize int32 - Blocks uint64 - Bfree uint64 - Bavail uint64 - Files uint64 - Ffree uint64 - Fsid Fsid - Namelen int32 - Frsize int32 - Flags int32 - Spare [4]int32 + Dev uint64 + _ uint16 + _ [2]byte + _ uint32 + Mode uint32 + Nlink uint32 + Uid uint32 + Gid uint32 + Rdev uint64 + _ uint16 + _ [2]byte + Size int64 + Blksize int32 + Blocks int64 + Atim Timespec + Mtim Timespec + Ctim Timespec + Ino uint64 } type StatxTimestamp struct { - Sec int64 - Nsec uint32 - X__reserved int32 + Sec int64 + Nsec uint32 + _ int32 } type Statx_t struct { @@ -171,7 +156,7 @@ type Dirent struct { } type Fsid struct { - X__val [2]int32 + Val [2]int32 } type Flock_t struct { @@ -491,7 +476,7 @@ const ( IFLA_EVENT = 0x2c IFLA_NEW_NETNSID = 0x2d IFLA_IF_NETNSID = 0x2e - IFLA_MAX = 0x2e + IFLA_MAX = 0x31 RT_SCOPE_UNIVERSE = 0x0 RT_SCOPE_SITE = 0xc8 RT_SCOPE_LINK = 0xfd @@ -583,12 +568,12 @@ type RtAttr struct { } type IfInfomsg struct { - Family uint8 - X__ifi_pad uint8 - Type uint16 - Index int32 - Flags uint32 - Change uint32 + Family uint8 + _ uint8 + Type uint16 + Index int32 + Flags uint32 + Change uint32 } type IfAddrmsg struct { @@ -683,7 +668,7 @@ type Sysinfo_t struct { Totalhigh uint32 Freehigh uint32 Unit uint32 - X_f [8]int8 + _ [8]int8 } type Utsname struct { @@ -739,7 +724,7 @@ const ( ) type Sigset_t struct { - X__val [32]uint32 + Val [32]uint32 } const RNDGETENTCNT = 0x80045200 @@ -895,3 +880,936 @@ const ( BDADDR_LE_PUBLIC = 0x1 BDADDR_LE_RANDOM = 0x2 ) + +type PerfEventAttr struct { + Type uint32 + Size uint32 + Config uint64 + Sample uint64 + Sample_type uint64 + Read_format uint64 + Bits uint64 + Wakeup uint32 + Bp_type uint32 + Ext1 uint64 + Ext2 uint64 + Branch_sample_type uint64 + Sample_regs_user uint64 + Sample_stack_user uint32 + Clockid int32 + Sample_regs_intr uint64 + Aux_watermark uint32 + _ uint32 +} + +type PerfEventMmapPage struct { + Version uint32 + Compat_version uint32 + Lock uint32 + Index uint32 + Offset int64 + Time_enabled uint64 + Time_running uint64 + Capabilities uint64 + Pmc_width uint16 + Time_shift uint16 + Time_mult uint32 + Time_offset uint64 + Time_zero uint64 + Size uint32 + _ [948]uint8 + Data_head uint64 + Data_tail uint64 + Data_offset uint64 + Data_size uint64 + Aux_head uint64 + Aux_tail uint64 + Aux_offset uint64 + Aux_size uint64 +} + +const ( + PerfBitDisabled uint64 = CBitFieldMaskBit0 + PerfBitInherit = CBitFieldMaskBit1 + PerfBitPinned = CBitFieldMaskBit2 + PerfBitExclusive = CBitFieldMaskBit3 + PerfBitExcludeUser = CBitFieldMaskBit4 + PerfBitExcludeKernel = CBitFieldMaskBit5 + PerfBitExcludeHv = CBitFieldMaskBit6 + PerfBitExcludeIdle = CBitFieldMaskBit7 + PerfBitMmap = CBitFieldMaskBit8 + PerfBitComm = CBitFieldMaskBit9 + PerfBitFreq = CBitFieldMaskBit10 + PerfBitInheritStat = CBitFieldMaskBit11 + PerfBitEnableOnExec = CBitFieldMaskBit12 + PerfBitTask = CBitFieldMaskBit13 + PerfBitWatermark = CBitFieldMaskBit14 + PerfBitPreciseIPBit1 = CBitFieldMaskBit15 + PerfBitPreciseIPBit2 = CBitFieldMaskBit16 + PerfBitMmapData = CBitFieldMaskBit17 + PerfBitSampleIDAll = CBitFieldMaskBit18 + PerfBitExcludeHost = CBitFieldMaskBit19 + PerfBitExcludeGuest = CBitFieldMaskBit20 + PerfBitExcludeCallchainKernel = CBitFieldMaskBit21 + PerfBitExcludeCallchainUser = CBitFieldMaskBit22 + PerfBitMmap2 = CBitFieldMaskBit23 + PerfBitCommExec = CBitFieldMaskBit24 + PerfBitUseClockID = CBitFieldMaskBit25 + PerfBitContextSwitch = CBitFieldMaskBit26 +) + +const ( + PERF_TYPE_HARDWARE = 0x0 + PERF_TYPE_SOFTWARE = 0x1 + PERF_TYPE_TRACEPOINT = 0x2 + PERF_TYPE_HW_CACHE = 0x3 + PERF_TYPE_RAW = 0x4 + PERF_TYPE_BREAKPOINT = 0x5 + + PERF_COUNT_HW_CPU_CYCLES = 0x0 + PERF_COUNT_HW_INSTRUCTIONS = 0x1 + PERF_COUNT_HW_CACHE_REFERENCES = 0x2 + PERF_COUNT_HW_CACHE_MISSES = 0x3 + PERF_COUNT_HW_BRANCH_INSTRUCTIONS = 0x4 + PERF_COUNT_HW_BRANCH_MISSES = 0x5 + PERF_COUNT_HW_BUS_CYCLES = 0x6 + PERF_COUNT_HW_STALLED_CYCLES_FRONTEND = 0x7 + PERF_COUNT_HW_STALLED_CYCLES_BACKEND = 0x8 + PERF_COUNT_HW_REF_CPU_CYCLES = 0x9 + + PERF_COUNT_HW_CACHE_L1D = 0x0 + PERF_COUNT_HW_CACHE_L1I = 0x1 + PERF_COUNT_HW_CACHE_LL = 0x2 + PERF_COUNT_HW_CACHE_DTLB = 0x3 + PERF_COUNT_HW_CACHE_ITLB = 0x4 + PERF_COUNT_HW_CACHE_BPU = 0x5 + PERF_COUNT_HW_CACHE_NODE = 0x6 + + PERF_COUNT_HW_CACHE_OP_READ = 0x0 + PERF_COUNT_HW_CACHE_OP_WRITE = 0x1 + PERF_COUNT_HW_CACHE_OP_PREFETCH = 0x2 + + PERF_COUNT_HW_CACHE_RESULT_ACCESS = 0x0 + PERF_COUNT_HW_CACHE_RESULT_MISS = 0x1 + + PERF_COUNT_SW_CPU_CLOCK = 0x0 + PERF_COUNT_SW_TASK_CLOCK = 0x1 + PERF_COUNT_SW_PAGE_FAULTS = 0x2 + PERF_COUNT_SW_CONTEXT_SWITCHES = 0x3 + PERF_COUNT_SW_CPU_MIGRATIONS = 0x4 + PERF_COUNT_SW_PAGE_FAULTS_MIN = 0x5 + PERF_COUNT_SW_PAGE_FAULTS_MAJ = 0x6 + PERF_COUNT_SW_ALIGNMENT_FAULTS = 0x7 + PERF_COUNT_SW_EMULATION_FAULTS = 0x8 + PERF_COUNT_SW_DUMMY = 0x9 + + PERF_SAMPLE_IP = 0x1 + PERF_SAMPLE_TID = 0x2 + PERF_SAMPLE_TIME = 0x4 + PERF_SAMPLE_ADDR = 0x8 + PERF_SAMPLE_READ = 0x10 + PERF_SAMPLE_CALLCHAIN = 0x20 + PERF_SAMPLE_ID = 0x40 + PERF_SAMPLE_CPU = 0x80 + PERF_SAMPLE_PERIOD = 0x100 + PERF_SAMPLE_STREAM_ID = 0x200 + PERF_SAMPLE_RAW = 0x400 + PERF_SAMPLE_BRANCH_STACK = 0x800 + + PERF_SAMPLE_BRANCH_USER = 0x1 + PERF_SAMPLE_BRANCH_KERNEL = 0x2 + PERF_SAMPLE_BRANCH_HV = 0x4 + PERF_SAMPLE_BRANCH_ANY = 0x8 + PERF_SAMPLE_BRANCH_ANY_CALL = 0x10 + PERF_SAMPLE_BRANCH_ANY_RETURN = 0x20 + PERF_SAMPLE_BRANCH_IND_CALL = 0x40 + + PERF_FORMAT_TOTAL_TIME_ENABLED = 0x1 + PERF_FORMAT_TOTAL_TIME_RUNNING = 0x2 + PERF_FORMAT_ID = 0x4 + PERF_FORMAT_GROUP = 0x8 + + PERF_RECORD_MMAP = 0x1 + PERF_RECORD_LOST = 0x2 + PERF_RECORD_COMM = 0x3 + PERF_RECORD_EXIT = 0x4 + PERF_RECORD_THROTTLE = 0x5 + PERF_RECORD_UNTHROTTLE = 0x6 + PERF_RECORD_FORK = 0x7 + PERF_RECORD_READ = 0x8 + PERF_RECORD_SAMPLE = 0x9 + + PERF_CONTEXT_HV = -0x20 + PERF_CONTEXT_KERNEL = -0x80 + PERF_CONTEXT_USER = -0x200 + + PERF_CONTEXT_GUEST = -0x800 + PERF_CONTEXT_GUEST_KERNEL = -0x880 + PERF_CONTEXT_GUEST_USER = -0xa00 + + PERF_FLAG_FD_NO_GROUP = 0x1 + PERF_FLAG_FD_OUTPUT = 0x2 + PERF_FLAG_PID_CGROUP = 0x4 +) + +const ( + CBitFieldMaskBit0 = 0x1 + CBitFieldMaskBit1 = 0x2 + CBitFieldMaskBit2 = 0x4 + CBitFieldMaskBit3 = 0x8 + CBitFieldMaskBit4 = 0x10 + CBitFieldMaskBit5 = 0x20 + CBitFieldMaskBit6 = 0x40 + CBitFieldMaskBit7 = 0x80 + CBitFieldMaskBit8 = 0x100 + CBitFieldMaskBit9 = 0x200 + CBitFieldMaskBit10 = 0x400 + CBitFieldMaskBit11 = 0x800 + CBitFieldMaskBit12 = 0x1000 + CBitFieldMaskBit13 = 0x2000 + CBitFieldMaskBit14 = 0x4000 + CBitFieldMaskBit15 = 0x8000 + CBitFieldMaskBit16 = 0x10000 + CBitFieldMaskBit17 = 0x20000 + CBitFieldMaskBit18 = 0x40000 + CBitFieldMaskBit19 = 0x80000 + CBitFieldMaskBit20 = 0x100000 + CBitFieldMaskBit21 = 0x200000 + CBitFieldMaskBit22 = 0x400000 + CBitFieldMaskBit23 = 0x800000 + CBitFieldMaskBit24 = 0x1000000 + CBitFieldMaskBit25 = 0x2000000 + CBitFieldMaskBit26 = 0x4000000 + CBitFieldMaskBit27 = 0x8000000 + CBitFieldMaskBit28 = 0x10000000 + CBitFieldMaskBit29 = 0x20000000 + CBitFieldMaskBit30 = 0x40000000 + CBitFieldMaskBit31 = 0x80000000 + CBitFieldMaskBit32 = 0x100000000 + CBitFieldMaskBit33 = 0x200000000 + CBitFieldMaskBit34 = 0x400000000 + CBitFieldMaskBit35 = 0x800000000 + CBitFieldMaskBit36 = 0x1000000000 + CBitFieldMaskBit37 = 0x2000000000 + CBitFieldMaskBit38 = 0x4000000000 + CBitFieldMaskBit39 = 0x8000000000 + CBitFieldMaskBit40 = 0x10000000000 + CBitFieldMaskBit41 = 0x20000000000 + CBitFieldMaskBit42 = 0x40000000000 + CBitFieldMaskBit43 = 0x80000000000 + CBitFieldMaskBit44 = 0x100000000000 + CBitFieldMaskBit45 = 0x200000000000 + CBitFieldMaskBit46 = 0x400000000000 + CBitFieldMaskBit47 = 0x800000000000 + CBitFieldMaskBit48 = 0x1000000000000 + CBitFieldMaskBit49 = 0x2000000000000 + CBitFieldMaskBit50 = 0x4000000000000 + CBitFieldMaskBit51 = 0x8000000000000 + CBitFieldMaskBit52 = 0x10000000000000 + CBitFieldMaskBit53 = 0x20000000000000 + CBitFieldMaskBit54 = 0x40000000000000 + CBitFieldMaskBit55 = 0x80000000000000 + CBitFieldMaskBit56 = 0x100000000000000 + CBitFieldMaskBit57 = 0x200000000000000 + CBitFieldMaskBit58 = 0x400000000000000 + CBitFieldMaskBit59 = 0x800000000000000 + CBitFieldMaskBit60 = 0x1000000000000000 + CBitFieldMaskBit61 = 0x2000000000000000 + CBitFieldMaskBit62 = 0x4000000000000000 + CBitFieldMaskBit63 = 0x8000000000000000 +) + +type SockaddrStorage struct { + Family uint16 + _ [122]int8 + _ uint32 +} + +type TCPMD5Sig struct { + Addr SockaddrStorage + Flags uint8 + Prefixlen uint8 + Keylen uint16 + _ uint32 + Key [80]uint8 +} + +type HDDriveCmdHdr struct { + Command uint8 + Number uint8 + Feature uint8 + Count uint8 +} + +type HDGeometry struct { + Heads uint8 + Sectors uint8 + Cylinders uint16 + Start uint32 +} + +type HDDriveID struct { + Config uint16 + Cyls uint16 + Reserved2 uint16 + Heads uint16 + Track_bytes uint16 + Sector_bytes uint16 + Sectors uint16 + Vendor0 uint16 + Vendor1 uint16 + Vendor2 uint16 + Serial_no [20]uint8 + Buf_type uint16 + Buf_size uint16 + Ecc_bytes uint16 + Fw_rev [8]uint8 + Model [40]uint8 + Max_multsect uint8 + Vendor3 uint8 + Dword_io uint16 + Vendor4 uint8 + Capability uint8 + Reserved50 uint16 + Vendor5 uint8 + TPIO uint8 + Vendor6 uint8 + TDMA uint8 + Field_valid uint16 + Cur_cyls uint16 + Cur_heads uint16 + Cur_sectors uint16 + Cur_capacity0 uint16 + Cur_capacity1 uint16 + Multsect uint8 + Multsect_valid uint8 + Lba_capacity uint32 + Dma_1word uint16 + Dma_mword uint16 + Eide_pio_modes uint16 + Eide_dma_min uint16 + Eide_dma_time uint16 + Eide_pio uint16 + Eide_pio_iordy uint16 + Words69_70 [2]uint16 + Words71_74 [4]uint16 + Queue_depth uint16 + Words76_79 [4]uint16 + Major_rev_num uint16 + Minor_rev_num uint16 + Command_set_1 uint16 + Command_set_2 uint16 + Cfsse uint16 + Cfs_enable_1 uint16 + Cfs_enable_2 uint16 + Csf_default uint16 + Dma_ultra uint16 + Trseuc uint16 + TrsEuc uint16 + CurAPMvalues uint16 + Mprc uint16 + Hw_config uint16 + Acoustic uint16 + Msrqs uint16 + Sxfert uint16 + Sal uint16 + Spg uint32 + Lba_capacity_2 uint64 + Words104_125 [22]uint16 + Last_lun uint16 + Word127 uint16 + Dlf uint16 + Csfo uint16 + Words130_155 [26]uint16 + Word156 uint16 + Words157_159 [3]uint16 + Cfa_power uint16 + Words161_175 [15]uint16 + Words176_205 [30]uint16 + Words206_254 [49]uint16 + Integrity_word uint16 +} + +type Statfs_t struct { + Type int32 + Bsize int32 + Blocks uint64 + Bfree uint64 + Bavail uint64 + Files uint64 + Ffree uint64 + Fsid Fsid + Namelen int32 + Frsize int32 + Flags int32 + Spare [4]int32 +} + +const ( + ST_MANDLOCK = 0x40 + ST_NOATIME = 0x400 + ST_NODEV = 0x4 + ST_NODIRATIME = 0x800 + ST_NOEXEC = 0x8 + ST_NOSUID = 0x2 + ST_RDONLY = 0x1 + ST_RELATIME = 0x1000 + ST_SYNCHRONOUS = 0x10 +) + +type TpacketHdr struct { + Status uint32 + Len uint32 + Snaplen uint32 + Mac uint16 + Net uint16 + Sec uint32 + Usec uint32 +} + +type Tpacket2Hdr struct { + Status uint32 + Len uint32 + Snaplen uint32 + Mac uint16 + Net uint16 + Sec uint32 + Nsec uint32 + Vlan_tci uint16 + Vlan_tpid uint16 + _ [4]uint8 +} + +type Tpacket3Hdr struct { + Next_offset uint32 + Sec uint32 + Nsec uint32 + Snaplen uint32 + Len uint32 + Status uint32 + Mac uint16 + Net uint16 + Hv1 TpacketHdrVariant1 + _ [8]uint8 +} + +type TpacketHdrVariant1 struct { + Rxhash uint32 + Vlan_tci uint32 + Vlan_tpid uint16 + _ uint16 +} + +type TpacketBlockDesc struct { + Version uint32 + To_priv uint32 + Hdr [40]byte +} + +type TpacketReq struct { + Block_size uint32 + Block_nr uint32 + Frame_size uint32 + Frame_nr uint32 +} + +type TpacketReq3 struct { + Block_size uint32 + Block_nr uint32 + Frame_size uint32 + Frame_nr uint32 + Retire_blk_tov uint32 + Sizeof_priv uint32 + Feature_req_word uint32 +} + +type TpacketStats struct { + Packets uint32 + Drops uint32 +} + +type TpacketStatsV3 struct { + Packets uint32 + Drops uint32 + Freeze_q_cnt uint32 +} + +type TpacketAuxdata struct { + Status uint32 + Len uint32 + Snaplen uint32 + Mac uint16 + Net uint16 + Vlan_tci uint16 + Vlan_tpid uint16 +} + +const ( + TPACKET_V1 = 0x0 + TPACKET_V2 = 0x1 + TPACKET_V3 = 0x2 +) + +const ( + SizeofTpacketHdr = 0x18 + SizeofTpacket2Hdr = 0x20 + SizeofTpacket3Hdr = 0x30 +) + +const ( + NF_INET_PRE_ROUTING = 0x0 + NF_INET_LOCAL_IN = 0x1 + NF_INET_FORWARD = 0x2 + NF_INET_LOCAL_OUT = 0x3 + NF_INET_POST_ROUTING = 0x4 + NF_INET_NUMHOOKS = 0x5 +) + +const ( + NF_NETDEV_INGRESS = 0x0 + NF_NETDEV_NUMHOOKS = 0x1 +) + +const ( + NFPROTO_UNSPEC = 0x0 + NFPROTO_INET = 0x1 + NFPROTO_IPV4 = 0x2 + NFPROTO_ARP = 0x3 + NFPROTO_NETDEV = 0x5 + NFPROTO_BRIDGE = 0x7 + NFPROTO_IPV6 = 0xa + NFPROTO_DECNET = 0xc + NFPROTO_NUMPROTO = 0xd +) + +type Nfgenmsg struct { + Nfgen_family uint8 + Version uint8 + Res_id uint16 +} + +const ( + NFNL_BATCH_UNSPEC = 0x0 + NFNL_BATCH_GENID = 0x1 +) + +const ( + NFT_REG_VERDICT = 0x0 + NFT_REG_1 = 0x1 + NFT_REG_2 = 0x2 + NFT_REG_3 = 0x3 + NFT_REG_4 = 0x4 + NFT_REG32_00 = 0x8 + NFT_REG32_01 = 0x9 + NFT_REG32_02 = 0xa + NFT_REG32_03 = 0xb + NFT_REG32_04 = 0xc + NFT_REG32_05 = 0xd + NFT_REG32_06 = 0xe + NFT_REG32_07 = 0xf + NFT_REG32_08 = 0x10 + NFT_REG32_09 = 0x11 + NFT_REG32_10 = 0x12 + NFT_REG32_11 = 0x13 + NFT_REG32_12 = 0x14 + NFT_REG32_13 = 0x15 + NFT_REG32_14 = 0x16 + NFT_REG32_15 = 0x17 + NFT_CONTINUE = -0x1 + NFT_BREAK = -0x2 + NFT_JUMP = -0x3 + NFT_GOTO = -0x4 + NFT_RETURN = -0x5 + NFT_MSG_NEWTABLE = 0x0 + NFT_MSG_GETTABLE = 0x1 + NFT_MSG_DELTABLE = 0x2 + NFT_MSG_NEWCHAIN = 0x3 + NFT_MSG_GETCHAIN = 0x4 + NFT_MSG_DELCHAIN = 0x5 + NFT_MSG_NEWRULE = 0x6 + NFT_MSG_GETRULE = 0x7 + NFT_MSG_DELRULE = 0x8 + NFT_MSG_NEWSET = 0x9 + NFT_MSG_GETSET = 0xa + NFT_MSG_DELSET = 0xb + NFT_MSG_NEWSETELEM = 0xc + NFT_MSG_GETSETELEM = 0xd + NFT_MSG_DELSETELEM = 0xe + NFT_MSG_NEWGEN = 0xf + NFT_MSG_GETGEN = 0x10 + NFT_MSG_TRACE = 0x11 + NFT_MSG_NEWOBJ = 0x12 + NFT_MSG_GETOBJ = 0x13 + NFT_MSG_DELOBJ = 0x14 + NFT_MSG_GETOBJ_RESET = 0x15 + NFT_MSG_MAX = 0x19 + NFTA_LIST_UNPEC = 0x0 + NFTA_LIST_ELEM = 0x1 + NFTA_HOOK_UNSPEC = 0x0 + NFTA_HOOK_HOOKNUM = 0x1 + NFTA_HOOK_PRIORITY = 0x2 + NFTA_HOOK_DEV = 0x3 + NFT_TABLE_F_DORMANT = 0x1 + NFTA_TABLE_UNSPEC = 0x0 + NFTA_TABLE_NAME = 0x1 + NFTA_TABLE_FLAGS = 0x2 + NFTA_TABLE_USE = 0x3 + NFTA_CHAIN_UNSPEC = 0x0 + NFTA_CHAIN_TABLE = 0x1 + NFTA_CHAIN_HANDLE = 0x2 + NFTA_CHAIN_NAME = 0x3 + NFTA_CHAIN_HOOK = 0x4 + NFTA_CHAIN_POLICY = 0x5 + NFTA_CHAIN_USE = 0x6 + NFTA_CHAIN_TYPE = 0x7 + NFTA_CHAIN_COUNTERS = 0x8 + NFTA_CHAIN_PAD = 0x9 + NFTA_RULE_UNSPEC = 0x0 + NFTA_RULE_TABLE = 0x1 + NFTA_RULE_CHAIN = 0x2 + NFTA_RULE_HANDLE = 0x3 + NFTA_RULE_EXPRESSIONS = 0x4 + NFTA_RULE_COMPAT = 0x5 + NFTA_RULE_POSITION = 0x6 + NFTA_RULE_USERDATA = 0x7 + NFTA_RULE_PAD = 0x8 + NFTA_RULE_ID = 0x9 + NFT_RULE_COMPAT_F_INV = 0x2 + NFT_RULE_COMPAT_F_MASK = 0x2 + NFTA_RULE_COMPAT_UNSPEC = 0x0 + NFTA_RULE_COMPAT_PROTO = 0x1 + NFTA_RULE_COMPAT_FLAGS = 0x2 + NFT_SET_ANONYMOUS = 0x1 + NFT_SET_CONSTANT = 0x2 + NFT_SET_INTERVAL = 0x4 + NFT_SET_MAP = 0x8 + NFT_SET_TIMEOUT = 0x10 + NFT_SET_EVAL = 0x20 + NFT_SET_OBJECT = 0x40 + NFT_SET_POL_PERFORMANCE = 0x0 + NFT_SET_POL_MEMORY = 0x1 + NFTA_SET_DESC_UNSPEC = 0x0 + NFTA_SET_DESC_SIZE = 0x1 + NFTA_SET_UNSPEC = 0x0 + NFTA_SET_TABLE = 0x1 + NFTA_SET_NAME = 0x2 + NFTA_SET_FLAGS = 0x3 + NFTA_SET_KEY_TYPE = 0x4 + NFTA_SET_KEY_LEN = 0x5 + NFTA_SET_DATA_TYPE = 0x6 + NFTA_SET_DATA_LEN = 0x7 + NFTA_SET_POLICY = 0x8 + NFTA_SET_DESC = 0x9 + NFTA_SET_ID = 0xa + NFTA_SET_TIMEOUT = 0xb + NFTA_SET_GC_INTERVAL = 0xc + NFTA_SET_USERDATA = 0xd + NFTA_SET_PAD = 0xe + NFTA_SET_OBJ_TYPE = 0xf + NFT_SET_ELEM_INTERVAL_END = 0x1 + NFTA_SET_ELEM_UNSPEC = 0x0 + NFTA_SET_ELEM_KEY = 0x1 + NFTA_SET_ELEM_DATA = 0x2 + NFTA_SET_ELEM_FLAGS = 0x3 + NFTA_SET_ELEM_TIMEOUT = 0x4 + NFTA_SET_ELEM_EXPIRATION = 0x5 + NFTA_SET_ELEM_USERDATA = 0x6 + NFTA_SET_ELEM_EXPR = 0x7 + NFTA_SET_ELEM_PAD = 0x8 + NFTA_SET_ELEM_OBJREF = 0x9 + NFTA_SET_ELEM_LIST_UNSPEC = 0x0 + NFTA_SET_ELEM_LIST_TABLE = 0x1 + NFTA_SET_ELEM_LIST_SET = 0x2 + NFTA_SET_ELEM_LIST_ELEMENTS = 0x3 + NFTA_SET_ELEM_LIST_SET_ID = 0x4 + NFT_DATA_VALUE = 0x0 + NFT_DATA_VERDICT = 0xffffff00 + NFTA_DATA_UNSPEC = 0x0 + NFTA_DATA_VALUE = 0x1 + NFTA_DATA_VERDICT = 0x2 + NFTA_VERDICT_UNSPEC = 0x0 + NFTA_VERDICT_CODE = 0x1 + NFTA_VERDICT_CHAIN = 0x2 + NFTA_EXPR_UNSPEC = 0x0 + NFTA_EXPR_NAME = 0x1 + NFTA_EXPR_DATA = 0x2 + NFTA_IMMEDIATE_UNSPEC = 0x0 + NFTA_IMMEDIATE_DREG = 0x1 + NFTA_IMMEDIATE_DATA = 0x2 + NFTA_BITWISE_UNSPEC = 0x0 + NFTA_BITWISE_SREG = 0x1 + NFTA_BITWISE_DREG = 0x2 + NFTA_BITWISE_LEN = 0x3 + NFTA_BITWISE_MASK = 0x4 + NFTA_BITWISE_XOR = 0x5 + NFT_BYTEORDER_NTOH = 0x0 + NFT_BYTEORDER_HTON = 0x1 + NFTA_BYTEORDER_UNSPEC = 0x0 + NFTA_BYTEORDER_SREG = 0x1 + NFTA_BYTEORDER_DREG = 0x2 + NFTA_BYTEORDER_OP = 0x3 + NFTA_BYTEORDER_LEN = 0x4 + NFTA_BYTEORDER_SIZE = 0x5 + NFT_CMP_EQ = 0x0 + NFT_CMP_NEQ = 0x1 + NFT_CMP_LT = 0x2 + NFT_CMP_LTE = 0x3 + NFT_CMP_GT = 0x4 + NFT_CMP_GTE = 0x5 + NFTA_CMP_UNSPEC = 0x0 + NFTA_CMP_SREG = 0x1 + NFTA_CMP_OP = 0x2 + NFTA_CMP_DATA = 0x3 + NFT_RANGE_EQ = 0x0 + NFT_RANGE_NEQ = 0x1 + NFTA_RANGE_UNSPEC = 0x0 + NFTA_RANGE_SREG = 0x1 + NFTA_RANGE_OP = 0x2 + NFTA_RANGE_FROM_DATA = 0x3 + NFTA_RANGE_TO_DATA = 0x4 + NFT_LOOKUP_F_INV = 0x1 + NFTA_LOOKUP_UNSPEC = 0x0 + NFTA_LOOKUP_SET = 0x1 + NFTA_LOOKUP_SREG = 0x2 + NFTA_LOOKUP_DREG = 0x3 + NFTA_LOOKUP_SET_ID = 0x4 + NFTA_LOOKUP_FLAGS = 0x5 + NFT_DYNSET_OP_ADD = 0x0 + NFT_DYNSET_OP_UPDATE = 0x1 + NFT_DYNSET_F_INV = 0x1 + NFTA_DYNSET_UNSPEC = 0x0 + NFTA_DYNSET_SET_NAME = 0x1 + NFTA_DYNSET_SET_ID = 0x2 + NFTA_DYNSET_OP = 0x3 + NFTA_DYNSET_SREG_KEY = 0x4 + NFTA_DYNSET_SREG_DATA = 0x5 + NFTA_DYNSET_TIMEOUT = 0x6 + NFTA_DYNSET_EXPR = 0x7 + NFTA_DYNSET_PAD = 0x8 + NFTA_DYNSET_FLAGS = 0x9 + NFT_PAYLOAD_LL_HEADER = 0x0 + NFT_PAYLOAD_NETWORK_HEADER = 0x1 + NFT_PAYLOAD_TRANSPORT_HEADER = 0x2 + NFT_PAYLOAD_CSUM_NONE = 0x0 + NFT_PAYLOAD_CSUM_INET = 0x1 + NFT_PAYLOAD_L4CSUM_PSEUDOHDR = 0x1 + NFTA_PAYLOAD_UNSPEC = 0x0 + NFTA_PAYLOAD_DREG = 0x1 + NFTA_PAYLOAD_BASE = 0x2 + NFTA_PAYLOAD_OFFSET = 0x3 + NFTA_PAYLOAD_LEN = 0x4 + NFTA_PAYLOAD_SREG = 0x5 + NFTA_PAYLOAD_CSUM_TYPE = 0x6 + NFTA_PAYLOAD_CSUM_OFFSET = 0x7 + NFTA_PAYLOAD_CSUM_FLAGS = 0x8 + NFT_EXTHDR_F_PRESENT = 0x1 + NFT_EXTHDR_OP_IPV6 = 0x0 + NFT_EXTHDR_OP_TCPOPT = 0x1 + NFTA_EXTHDR_UNSPEC = 0x0 + NFTA_EXTHDR_DREG = 0x1 + NFTA_EXTHDR_TYPE = 0x2 + NFTA_EXTHDR_OFFSET = 0x3 + NFTA_EXTHDR_LEN = 0x4 + NFTA_EXTHDR_FLAGS = 0x5 + NFTA_EXTHDR_OP = 0x6 + NFTA_EXTHDR_SREG = 0x7 + NFT_META_LEN = 0x0 + NFT_META_PROTOCOL = 0x1 + NFT_META_PRIORITY = 0x2 + NFT_META_MARK = 0x3 + NFT_META_IIF = 0x4 + NFT_META_OIF = 0x5 + NFT_META_IIFNAME = 0x6 + NFT_META_OIFNAME = 0x7 + NFT_META_IIFTYPE = 0x8 + NFT_META_OIFTYPE = 0x9 + NFT_META_SKUID = 0xa + NFT_META_SKGID = 0xb + NFT_META_NFTRACE = 0xc + NFT_META_RTCLASSID = 0xd + NFT_META_SECMARK = 0xe + NFT_META_NFPROTO = 0xf + NFT_META_L4PROTO = 0x10 + NFT_META_BRI_IIFNAME = 0x11 + NFT_META_BRI_OIFNAME = 0x12 + NFT_META_PKTTYPE = 0x13 + NFT_META_CPU = 0x14 + NFT_META_IIFGROUP = 0x15 + NFT_META_OIFGROUP = 0x16 + NFT_META_CGROUP = 0x17 + NFT_META_PRANDOM = 0x18 + NFT_RT_CLASSID = 0x0 + NFT_RT_NEXTHOP4 = 0x1 + NFT_RT_NEXTHOP6 = 0x2 + NFT_RT_TCPMSS = 0x3 + NFT_HASH_JENKINS = 0x0 + NFT_HASH_SYM = 0x1 + NFTA_HASH_UNSPEC = 0x0 + NFTA_HASH_SREG = 0x1 + NFTA_HASH_DREG = 0x2 + NFTA_HASH_LEN = 0x3 + NFTA_HASH_MODULUS = 0x4 + NFTA_HASH_SEED = 0x5 + NFTA_HASH_OFFSET = 0x6 + NFTA_HASH_TYPE = 0x7 + NFTA_META_UNSPEC = 0x0 + NFTA_META_DREG = 0x1 + NFTA_META_KEY = 0x2 + NFTA_META_SREG = 0x3 + NFTA_RT_UNSPEC = 0x0 + NFTA_RT_DREG = 0x1 + NFTA_RT_KEY = 0x2 + NFT_CT_STATE = 0x0 + NFT_CT_DIRECTION = 0x1 + NFT_CT_STATUS = 0x2 + NFT_CT_MARK = 0x3 + NFT_CT_SECMARK = 0x4 + NFT_CT_EXPIRATION = 0x5 + NFT_CT_HELPER = 0x6 + NFT_CT_L3PROTOCOL = 0x7 + NFT_CT_SRC = 0x8 + NFT_CT_DST = 0x9 + NFT_CT_PROTOCOL = 0xa + NFT_CT_PROTO_SRC = 0xb + NFT_CT_PROTO_DST = 0xc + NFT_CT_LABELS = 0xd + NFT_CT_PKTS = 0xe + NFT_CT_BYTES = 0xf + NFT_CT_AVGPKT = 0x10 + NFT_CT_ZONE = 0x11 + NFT_CT_EVENTMASK = 0x12 + NFTA_CT_UNSPEC = 0x0 + NFTA_CT_DREG = 0x1 + NFTA_CT_KEY = 0x2 + NFTA_CT_DIRECTION = 0x3 + NFTA_CT_SREG = 0x4 + NFT_LIMIT_PKTS = 0x0 + NFT_LIMIT_PKT_BYTES = 0x1 + NFT_LIMIT_F_INV = 0x1 + NFTA_LIMIT_UNSPEC = 0x0 + NFTA_LIMIT_RATE = 0x1 + NFTA_LIMIT_UNIT = 0x2 + NFTA_LIMIT_BURST = 0x3 + NFTA_LIMIT_TYPE = 0x4 + NFTA_LIMIT_FLAGS = 0x5 + NFTA_LIMIT_PAD = 0x6 + NFTA_COUNTER_UNSPEC = 0x0 + NFTA_COUNTER_BYTES = 0x1 + NFTA_COUNTER_PACKETS = 0x2 + NFTA_COUNTER_PAD = 0x3 + NFTA_LOG_UNSPEC = 0x0 + NFTA_LOG_GROUP = 0x1 + NFTA_LOG_PREFIX = 0x2 + NFTA_LOG_SNAPLEN = 0x3 + NFTA_LOG_QTHRESHOLD = 0x4 + NFTA_LOG_LEVEL = 0x5 + NFTA_LOG_FLAGS = 0x6 + NFTA_QUEUE_UNSPEC = 0x0 + NFTA_QUEUE_NUM = 0x1 + NFTA_QUEUE_TOTAL = 0x2 + NFTA_QUEUE_FLAGS = 0x3 + NFTA_QUEUE_SREG_QNUM = 0x4 + NFT_QUOTA_F_INV = 0x1 + NFT_QUOTA_F_DEPLETED = 0x2 + NFTA_QUOTA_UNSPEC = 0x0 + NFTA_QUOTA_BYTES = 0x1 + NFTA_QUOTA_FLAGS = 0x2 + NFTA_QUOTA_PAD = 0x3 + NFTA_QUOTA_CONSUMED = 0x4 + NFT_REJECT_ICMP_UNREACH = 0x0 + NFT_REJECT_TCP_RST = 0x1 + NFT_REJECT_ICMPX_UNREACH = 0x2 + NFT_REJECT_ICMPX_NO_ROUTE = 0x0 + NFT_REJECT_ICMPX_PORT_UNREACH = 0x1 + NFT_REJECT_ICMPX_HOST_UNREACH = 0x2 + NFT_REJECT_ICMPX_ADMIN_PROHIBITED = 0x3 + NFTA_REJECT_UNSPEC = 0x0 + NFTA_REJECT_TYPE = 0x1 + NFTA_REJECT_ICMP_CODE = 0x2 + NFT_NAT_SNAT = 0x0 + NFT_NAT_DNAT = 0x1 + NFTA_NAT_UNSPEC = 0x0 + NFTA_NAT_TYPE = 0x1 + NFTA_NAT_FAMILY = 0x2 + NFTA_NAT_REG_ADDR_MIN = 0x3 + NFTA_NAT_REG_ADDR_MAX = 0x4 + NFTA_NAT_REG_PROTO_MIN = 0x5 + NFTA_NAT_REG_PROTO_MAX = 0x6 + NFTA_NAT_FLAGS = 0x7 + NFTA_MASQ_UNSPEC = 0x0 + NFTA_MASQ_FLAGS = 0x1 + NFTA_MASQ_REG_PROTO_MIN = 0x2 + NFTA_MASQ_REG_PROTO_MAX = 0x3 + NFTA_REDIR_UNSPEC = 0x0 + NFTA_REDIR_REG_PROTO_MIN = 0x1 + NFTA_REDIR_REG_PROTO_MAX = 0x2 + NFTA_REDIR_FLAGS = 0x3 + NFTA_DUP_UNSPEC = 0x0 + NFTA_DUP_SREG_ADDR = 0x1 + NFTA_DUP_SREG_DEV = 0x2 + NFTA_FWD_UNSPEC = 0x0 + NFTA_FWD_SREG_DEV = 0x1 + NFTA_OBJREF_UNSPEC = 0x0 + NFTA_OBJREF_IMM_TYPE = 0x1 + NFTA_OBJREF_IMM_NAME = 0x2 + NFTA_OBJREF_SET_SREG = 0x3 + NFTA_OBJREF_SET_NAME = 0x4 + NFTA_OBJREF_SET_ID = 0x5 + NFTA_GEN_UNSPEC = 0x0 + NFTA_GEN_ID = 0x1 + NFTA_GEN_PROC_PID = 0x2 + NFTA_GEN_PROC_NAME = 0x3 + NFTA_FIB_UNSPEC = 0x0 + NFTA_FIB_DREG = 0x1 + NFTA_FIB_RESULT = 0x2 + NFTA_FIB_FLAGS = 0x3 + NFT_FIB_RESULT_UNSPEC = 0x0 + NFT_FIB_RESULT_OIF = 0x1 + NFT_FIB_RESULT_OIFNAME = 0x2 + NFT_FIB_RESULT_ADDRTYPE = 0x3 + NFTA_FIB_F_SADDR = 0x1 + NFTA_FIB_F_DADDR = 0x2 + NFTA_FIB_F_MARK = 0x4 + NFTA_FIB_F_IIF = 0x8 + NFTA_FIB_F_OIF = 0x10 + NFTA_FIB_F_PRESENT = 0x20 + NFTA_CT_HELPER_UNSPEC = 0x0 + NFTA_CT_HELPER_NAME = 0x1 + NFTA_CT_HELPER_L3PROTO = 0x2 + NFTA_CT_HELPER_L4PROTO = 0x3 + NFTA_OBJ_UNSPEC = 0x0 + NFTA_OBJ_TABLE = 0x1 + NFTA_OBJ_NAME = 0x2 + NFTA_OBJ_TYPE = 0x3 + NFTA_OBJ_DATA = 0x4 + NFTA_OBJ_USE = 0x5 + NFTA_TRACE_UNSPEC = 0x0 + NFTA_TRACE_TABLE = 0x1 + NFTA_TRACE_CHAIN = 0x2 + NFTA_TRACE_RULE_HANDLE = 0x3 + NFTA_TRACE_TYPE = 0x4 + NFTA_TRACE_VERDICT = 0x5 + NFTA_TRACE_ID = 0x6 + NFTA_TRACE_LL_HEADER = 0x7 + NFTA_TRACE_NETWORK_HEADER = 0x8 + NFTA_TRACE_TRANSPORT_HEADER = 0x9 + NFTA_TRACE_IIF = 0xa + NFTA_TRACE_IIFTYPE = 0xb + NFTA_TRACE_OIF = 0xc + NFTA_TRACE_OIFTYPE = 0xd + NFTA_TRACE_MARK = 0xe + NFTA_TRACE_NFPROTO = 0xf + NFTA_TRACE_POLICY = 0x10 + NFTA_TRACE_PAD = 0x11 + NFT_TRACETYPE_UNSPEC = 0x0 + NFT_TRACETYPE_POLICY = 0x1 + NFT_TRACETYPE_RETURN = 0x2 + NFT_TRACETYPE_RULE = 0x3 + NFTA_NG_UNSPEC = 0x0 + NFTA_NG_DREG = 0x1 + NFTA_NG_MODULUS = 0x2 + NFTA_NG_TYPE = 0x3 + NFTA_NG_OFFSET = 0x4 + NFT_NG_INCREMENTAL = 0x0 + NFT_NG_RANDOM = 0x1 +) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go index 4df7088d40a..2ab0cb9e79b 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go @@ -105,7 +105,7 @@ type Stat_t struct { Mode uint32 Uid uint32 Gid uint32 - X__pad0 int32 + _ int32 Rdev uint64 Size int64 Blksize int64 @@ -116,25 +116,10 @@ type Stat_t struct { _ [3]int64 } -type Statfs_t struct { - Type int64 - Bsize int64 - Blocks uint64 - Bfree uint64 - Bavail uint64 - Files uint64 - Ffree uint64 - Fsid Fsid - Namelen int64 - Frsize int64 - Flags int64 - Spare [4]int64 -} - type StatxTimestamp struct { - Sec int64 - Nsec uint32 - X__reserved int32 + Sec int64 + Nsec uint32 + _ int32 } type Statx_t struct { @@ -171,7 +156,7 @@ type Dirent struct { } type Fsid struct { - X__val [2]int32 + Val [2]int32 } type Flock_t struct { @@ -495,7 +480,7 @@ const ( IFLA_EVENT = 0x2c IFLA_NEW_NETNSID = 0x2d IFLA_IF_NETNSID = 0x2e - IFLA_MAX = 0x2e + IFLA_MAX = 0x31 RT_SCOPE_UNIVERSE = 0x0 RT_SCOPE_SITE = 0xc8 RT_SCOPE_LINK = 0xfd @@ -587,12 +572,12 @@ type RtAttr struct { } type IfInfomsg struct { - Family uint8 - X__ifi_pad uint8 - Type uint16 - Index int32 - Flags uint32 - Change uint32 + Family uint8 + _ uint8 + Type uint16 + Index int32 + Flags uint32 + Change uint32 } type IfAddrmsg struct { @@ -698,7 +683,7 @@ type Sysinfo_t struct { Totalhigh uint64 Freehigh uint64 Unit uint32 - X_f [0]int8 + _ [0]int8 _ [4]byte } @@ -757,7 +742,7 @@ const ( ) type Sigset_t struct { - X__val [16]uint64 + Val [16]uint64 } const RNDGETENTCNT = 0x80045200 @@ -913,3 +898,938 @@ const ( BDADDR_LE_PUBLIC = 0x1 BDADDR_LE_RANDOM = 0x2 ) + +type PerfEventAttr struct { + Type uint32 + Size uint32 + Config uint64 + Sample uint64 + Sample_type uint64 + Read_format uint64 + Bits uint64 + Wakeup uint32 + Bp_type uint32 + Ext1 uint64 + Ext2 uint64 + Branch_sample_type uint64 + Sample_regs_user uint64 + Sample_stack_user uint32 + Clockid int32 + Sample_regs_intr uint64 + Aux_watermark uint32 + _ uint32 +} + +type PerfEventMmapPage struct { + Version uint32 + Compat_version uint32 + Lock uint32 + Index uint32 + Offset int64 + Time_enabled uint64 + Time_running uint64 + Capabilities uint64 + Pmc_width uint16 + Time_shift uint16 + Time_mult uint32 + Time_offset uint64 + Time_zero uint64 + Size uint32 + _ [948]uint8 + Data_head uint64 + Data_tail uint64 + Data_offset uint64 + Data_size uint64 + Aux_head uint64 + Aux_tail uint64 + Aux_offset uint64 + Aux_size uint64 +} + +const ( + PerfBitDisabled uint64 = CBitFieldMaskBit0 + PerfBitInherit = CBitFieldMaskBit1 + PerfBitPinned = CBitFieldMaskBit2 + PerfBitExclusive = CBitFieldMaskBit3 + PerfBitExcludeUser = CBitFieldMaskBit4 + PerfBitExcludeKernel = CBitFieldMaskBit5 + PerfBitExcludeHv = CBitFieldMaskBit6 + PerfBitExcludeIdle = CBitFieldMaskBit7 + PerfBitMmap = CBitFieldMaskBit8 + PerfBitComm = CBitFieldMaskBit9 + PerfBitFreq = CBitFieldMaskBit10 + PerfBitInheritStat = CBitFieldMaskBit11 + PerfBitEnableOnExec = CBitFieldMaskBit12 + PerfBitTask = CBitFieldMaskBit13 + PerfBitWatermark = CBitFieldMaskBit14 + PerfBitPreciseIPBit1 = CBitFieldMaskBit15 + PerfBitPreciseIPBit2 = CBitFieldMaskBit16 + PerfBitMmapData = CBitFieldMaskBit17 + PerfBitSampleIDAll = CBitFieldMaskBit18 + PerfBitExcludeHost = CBitFieldMaskBit19 + PerfBitExcludeGuest = CBitFieldMaskBit20 + PerfBitExcludeCallchainKernel = CBitFieldMaskBit21 + PerfBitExcludeCallchainUser = CBitFieldMaskBit22 + PerfBitMmap2 = CBitFieldMaskBit23 + PerfBitCommExec = CBitFieldMaskBit24 + PerfBitUseClockID = CBitFieldMaskBit25 + PerfBitContextSwitch = CBitFieldMaskBit26 +) + +const ( + PERF_TYPE_HARDWARE = 0x0 + PERF_TYPE_SOFTWARE = 0x1 + PERF_TYPE_TRACEPOINT = 0x2 + PERF_TYPE_HW_CACHE = 0x3 + PERF_TYPE_RAW = 0x4 + PERF_TYPE_BREAKPOINT = 0x5 + + PERF_COUNT_HW_CPU_CYCLES = 0x0 + PERF_COUNT_HW_INSTRUCTIONS = 0x1 + PERF_COUNT_HW_CACHE_REFERENCES = 0x2 + PERF_COUNT_HW_CACHE_MISSES = 0x3 + PERF_COUNT_HW_BRANCH_INSTRUCTIONS = 0x4 + PERF_COUNT_HW_BRANCH_MISSES = 0x5 + PERF_COUNT_HW_BUS_CYCLES = 0x6 + PERF_COUNT_HW_STALLED_CYCLES_FRONTEND = 0x7 + PERF_COUNT_HW_STALLED_CYCLES_BACKEND = 0x8 + PERF_COUNT_HW_REF_CPU_CYCLES = 0x9 + + PERF_COUNT_HW_CACHE_L1D = 0x0 + PERF_COUNT_HW_CACHE_L1I = 0x1 + PERF_COUNT_HW_CACHE_LL = 0x2 + PERF_COUNT_HW_CACHE_DTLB = 0x3 + PERF_COUNT_HW_CACHE_ITLB = 0x4 + PERF_COUNT_HW_CACHE_BPU = 0x5 + PERF_COUNT_HW_CACHE_NODE = 0x6 + + PERF_COUNT_HW_CACHE_OP_READ = 0x0 + PERF_COUNT_HW_CACHE_OP_WRITE = 0x1 + PERF_COUNT_HW_CACHE_OP_PREFETCH = 0x2 + + PERF_COUNT_HW_CACHE_RESULT_ACCESS = 0x0 + PERF_COUNT_HW_CACHE_RESULT_MISS = 0x1 + + PERF_COUNT_SW_CPU_CLOCK = 0x0 + PERF_COUNT_SW_TASK_CLOCK = 0x1 + PERF_COUNT_SW_PAGE_FAULTS = 0x2 + PERF_COUNT_SW_CONTEXT_SWITCHES = 0x3 + PERF_COUNT_SW_CPU_MIGRATIONS = 0x4 + PERF_COUNT_SW_PAGE_FAULTS_MIN = 0x5 + PERF_COUNT_SW_PAGE_FAULTS_MAJ = 0x6 + PERF_COUNT_SW_ALIGNMENT_FAULTS = 0x7 + PERF_COUNT_SW_EMULATION_FAULTS = 0x8 + PERF_COUNT_SW_DUMMY = 0x9 + + PERF_SAMPLE_IP = 0x1 + PERF_SAMPLE_TID = 0x2 + PERF_SAMPLE_TIME = 0x4 + PERF_SAMPLE_ADDR = 0x8 + PERF_SAMPLE_READ = 0x10 + PERF_SAMPLE_CALLCHAIN = 0x20 + PERF_SAMPLE_ID = 0x40 + PERF_SAMPLE_CPU = 0x80 + PERF_SAMPLE_PERIOD = 0x100 + PERF_SAMPLE_STREAM_ID = 0x200 + PERF_SAMPLE_RAW = 0x400 + PERF_SAMPLE_BRANCH_STACK = 0x800 + + PERF_SAMPLE_BRANCH_USER = 0x1 + PERF_SAMPLE_BRANCH_KERNEL = 0x2 + PERF_SAMPLE_BRANCH_HV = 0x4 + PERF_SAMPLE_BRANCH_ANY = 0x8 + PERF_SAMPLE_BRANCH_ANY_CALL = 0x10 + PERF_SAMPLE_BRANCH_ANY_RETURN = 0x20 + PERF_SAMPLE_BRANCH_IND_CALL = 0x40 + + PERF_FORMAT_TOTAL_TIME_ENABLED = 0x1 + PERF_FORMAT_TOTAL_TIME_RUNNING = 0x2 + PERF_FORMAT_ID = 0x4 + PERF_FORMAT_GROUP = 0x8 + + PERF_RECORD_MMAP = 0x1 + PERF_RECORD_LOST = 0x2 + PERF_RECORD_COMM = 0x3 + PERF_RECORD_EXIT = 0x4 + PERF_RECORD_THROTTLE = 0x5 + PERF_RECORD_UNTHROTTLE = 0x6 + PERF_RECORD_FORK = 0x7 + PERF_RECORD_READ = 0x8 + PERF_RECORD_SAMPLE = 0x9 + + PERF_CONTEXT_HV = -0x20 + PERF_CONTEXT_KERNEL = -0x80 + PERF_CONTEXT_USER = -0x200 + + PERF_CONTEXT_GUEST = -0x800 + PERF_CONTEXT_GUEST_KERNEL = -0x880 + PERF_CONTEXT_GUEST_USER = -0xa00 + + PERF_FLAG_FD_NO_GROUP = 0x1 + PERF_FLAG_FD_OUTPUT = 0x2 + PERF_FLAG_PID_CGROUP = 0x4 +) + +const ( + CBitFieldMaskBit0 = 0x1 + CBitFieldMaskBit1 = 0x2 + CBitFieldMaskBit2 = 0x4 + CBitFieldMaskBit3 = 0x8 + CBitFieldMaskBit4 = 0x10 + CBitFieldMaskBit5 = 0x20 + CBitFieldMaskBit6 = 0x40 + CBitFieldMaskBit7 = 0x80 + CBitFieldMaskBit8 = 0x100 + CBitFieldMaskBit9 = 0x200 + CBitFieldMaskBit10 = 0x400 + CBitFieldMaskBit11 = 0x800 + CBitFieldMaskBit12 = 0x1000 + CBitFieldMaskBit13 = 0x2000 + CBitFieldMaskBit14 = 0x4000 + CBitFieldMaskBit15 = 0x8000 + CBitFieldMaskBit16 = 0x10000 + CBitFieldMaskBit17 = 0x20000 + CBitFieldMaskBit18 = 0x40000 + CBitFieldMaskBit19 = 0x80000 + CBitFieldMaskBit20 = 0x100000 + CBitFieldMaskBit21 = 0x200000 + CBitFieldMaskBit22 = 0x400000 + CBitFieldMaskBit23 = 0x800000 + CBitFieldMaskBit24 = 0x1000000 + CBitFieldMaskBit25 = 0x2000000 + CBitFieldMaskBit26 = 0x4000000 + CBitFieldMaskBit27 = 0x8000000 + CBitFieldMaskBit28 = 0x10000000 + CBitFieldMaskBit29 = 0x20000000 + CBitFieldMaskBit30 = 0x40000000 + CBitFieldMaskBit31 = 0x80000000 + CBitFieldMaskBit32 = 0x100000000 + CBitFieldMaskBit33 = 0x200000000 + CBitFieldMaskBit34 = 0x400000000 + CBitFieldMaskBit35 = 0x800000000 + CBitFieldMaskBit36 = 0x1000000000 + CBitFieldMaskBit37 = 0x2000000000 + CBitFieldMaskBit38 = 0x4000000000 + CBitFieldMaskBit39 = 0x8000000000 + CBitFieldMaskBit40 = 0x10000000000 + CBitFieldMaskBit41 = 0x20000000000 + CBitFieldMaskBit42 = 0x40000000000 + CBitFieldMaskBit43 = 0x80000000000 + CBitFieldMaskBit44 = 0x100000000000 + CBitFieldMaskBit45 = 0x200000000000 + CBitFieldMaskBit46 = 0x400000000000 + CBitFieldMaskBit47 = 0x800000000000 + CBitFieldMaskBit48 = 0x1000000000000 + CBitFieldMaskBit49 = 0x2000000000000 + CBitFieldMaskBit50 = 0x4000000000000 + CBitFieldMaskBit51 = 0x8000000000000 + CBitFieldMaskBit52 = 0x10000000000000 + CBitFieldMaskBit53 = 0x20000000000000 + CBitFieldMaskBit54 = 0x40000000000000 + CBitFieldMaskBit55 = 0x80000000000000 + CBitFieldMaskBit56 = 0x100000000000000 + CBitFieldMaskBit57 = 0x200000000000000 + CBitFieldMaskBit58 = 0x400000000000000 + CBitFieldMaskBit59 = 0x800000000000000 + CBitFieldMaskBit60 = 0x1000000000000000 + CBitFieldMaskBit61 = 0x2000000000000000 + CBitFieldMaskBit62 = 0x4000000000000000 + CBitFieldMaskBit63 = 0x8000000000000000 +) + +type SockaddrStorage struct { + Family uint16 + _ [118]int8 + _ uint64 +} + +type TCPMD5Sig struct { + Addr SockaddrStorage + Flags uint8 + Prefixlen uint8 + Keylen uint16 + _ uint32 + Key [80]uint8 +} + +type HDDriveCmdHdr struct { + Command uint8 + Number uint8 + Feature uint8 + Count uint8 +} + +type HDGeometry struct { + Heads uint8 + Sectors uint8 + Cylinders uint16 + _ [4]byte + Start uint64 +} + +type HDDriveID struct { + Config uint16 + Cyls uint16 + Reserved2 uint16 + Heads uint16 + Track_bytes uint16 + Sector_bytes uint16 + Sectors uint16 + Vendor0 uint16 + Vendor1 uint16 + Vendor2 uint16 + Serial_no [20]uint8 + Buf_type uint16 + Buf_size uint16 + Ecc_bytes uint16 + Fw_rev [8]uint8 + Model [40]uint8 + Max_multsect uint8 + Vendor3 uint8 + Dword_io uint16 + Vendor4 uint8 + Capability uint8 + Reserved50 uint16 + Vendor5 uint8 + TPIO uint8 + Vendor6 uint8 + TDMA uint8 + Field_valid uint16 + Cur_cyls uint16 + Cur_heads uint16 + Cur_sectors uint16 + Cur_capacity0 uint16 + Cur_capacity1 uint16 + Multsect uint8 + Multsect_valid uint8 + Lba_capacity uint32 + Dma_1word uint16 + Dma_mword uint16 + Eide_pio_modes uint16 + Eide_dma_min uint16 + Eide_dma_time uint16 + Eide_pio uint16 + Eide_pio_iordy uint16 + Words69_70 [2]uint16 + Words71_74 [4]uint16 + Queue_depth uint16 + Words76_79 [4]uint16 + Major_rev_num uint16 + Minor_rev_num uint16 + Command_set_1 uint16 + Command_set_2 uint16 + Cfsse uint16 + Cfs_enable_1 uint16 + Cfs_enable_2 uint16 + Csf_default uint16 + Dma_ultra uint16 + Trseuc uint16 + TrsEuc uint16 + CurAPMvalues uint16 + Mprc uint16 + Hw_config uint16 + Acoustic uint16 + Msrqs uint16 + Sxfert uint16 + Sal uint16 + Spg uint32 + Lba_capacity_2 uint64 + Words104_125 [22]uint16 + Last_lun uint16 + Word127 uint16 + Dlf uint16 + Csfo uint16 + Words130_155 [26]uint16 + Word156 uint16 + Words157_159 [3]uint16 + Cfa_power uint16 + Words161_175 [15]uint16 + Words176_205 [30]uint16 + Words206_254 [49]uint16 + Integrity_word uint16 +} + +type Statfs_t struct { + Type int64 + Bsize int64 + Blocks uint64 + Bfree uint64 + Bavail uint64 + Files uint64 + Ffree uint64 + Fsid Fsid + Namelen int64 + Frsize int64 + Flags int64 + Spare [4]int64 +} + +const ( + ST_MANDLOCK = 0x40 + ST_NOATIME = 0x400 + ST_NODEV = 0x4 + ST_NODIRATIME = 0x800 + ST_NOEXEC = 0x8 + ST_NOSUID = 0x2 + ST_RDONLY = 0x1 + ST_RELATIME = 0x1000 + ST_SYNCHRONOUS = 0x10 +) + +type TpacketHdr struct { + Status uint64 + Len uint32 + Snaplen uint32 + Mac uint16 + Net uint16 + Sec uint32 + Usec uint32 + _ [4]byte +} + +type Tpacket2Hdr struct { + Status uint32 + Len uint32 + Snaplen uint32 + Mac uint16 + Net uint16 + Sec uint32 + Nsec uint32 + Vlan_tci uint16 + Vlan_tpid uint16 + _ [4]uint8 +} + +type Tpacket3Hdr struct { + Next_offset uint32 + Sec uint32 + Nsec uint32 + Snaplen uint32 + Len uint32 + Status uint32 + Mac uint16 + Net uint16 + Hv1 TpacketHdrVariant1 + _ [8]uint8 +} + +type TpacketHdrVariant1 struct { + Rxhash uint32 + Vlan_tci uint32 + Vlan_tpid uint16 + _ uint16 +} + +type TpacketBlockDesc struct { + Version uint32 + To_priv uint32 + Hdr [40]byte +} + +type TpacketReq struct { + Block_size uint32 + Block_nr uint32 + Frame_size uint32 + Frame_nr uint32 +} + +type TpacketReq3 struct { + Block_size uint32 + Block_nr uint32 + Frame_size uint32 + Frame_nr uint32 + Retire_blk_tov uint32 + Sizeof_priv uint32 + Feature_req_word uint32 +} + +type TpacketStats struct { + Packets uint32 + Drops uint32 +} + +type TpacketStatsV3 struct { + Packets uint32 + Drops uint32 + Freeze_q_cnt uint32 +} + +type TpacketAuxdata struct { + Status uint32 + Len uint32 + Snaplen uint32 + Mac uint16 + Net uint16 + Vlan_tci uint16 + Vlan_tpid uint16 +} + +const ( + TPACKET_V1 = 0x0 + TPACKET_V2 = 0x1 + TPACKET_V3 = 0x2 +) + +const ( + SizeofTpacketHdr = 0x20 + SizeofTpacket2Hdr = 0x20 + SizeofTpacket3Hdr = 0x30 +) + +const ( + NF_INET_PRE_ROUTING = 0x0 + NF_INET_LOCAL_IN = 0x1 + NF_INET_FORWARD = 0x2 + NF_INET_LOCAL_OUT = 0x3 + NF_INET_POST_ROUTING = 0x4 + NF_INET_NUMHOOKS = 0x5 +) + +const ( + NF_NETDEV_INGRESS = 0x0 + NF_NETDEV_NUMHOOKS = 0x1 +) + +const ( + NFPROTO_UNSPEC = 0x0 + NFPROTO_INET = 0x1 + NFPROTO_IPV4 = 0x2 + NFPROTO_ARP = 0x3 + NFPROTO_NETDEV = 0x5 + NFPROTO_BRIDGE = 0x7 + NFPROTO_IPV6 = 0xa + NFPROTO_DECNET = 0xc + NFPROTO_NUMPROTO = 0xd +) + +type Nfgenmsg struct { + Nfgen_family uint8 + Version uint8 + Res_id uint16 +} + +const ( + NFNL_BATCH_UNSPEC = 0x0 + NFNL_BATCH_GENID = 0x1 +) + +const ( + NFT_REG_VERDICT = 0x0 + NFT_REG_1 = 0x1 + NFT_REG_2 = 0x2 + NFT_REG_3 = 0x3 + NFT_REG_4 = 0x4 + NFT_REG32_00 = 0x8 + NFT_REG32_01 = 0x9 + NFT_REG32_02 = 0xa + NFT_REG32_03 = 0xb + NFT_REG32_04 = 0xc + NFT_REG32_05 = 0xd + NFT_REG32_06 = 0xe + NFT_REG32_07 = 0xf + NFT_REG32_08 = 0x10 + NFT_REG32_09 = 0x11 + NFT_REG32_10 = 0x12 + NFT_REG32_11 = 0x13 + NFT_REG32_12 = 0x14 + NFT_REG32_13 = 0x15 + NFT_REG32_14 = 0x16 + NFT_REG32_15 = 0x17 + NFT_CONTINUE = -0x1 + NFT_BREAK = -0x2 + NFT_JUMP = -0x3 + NFT_GOTO = -0x4 + NFT_RETURN = -0x5 + NFT_MSG_NEWTABLE = 0x0 + NFT_MSG_GETTABLE = 0x1 + NFT_MSG_DELTABLE = 0x2 + NFT_MSG_NEWCHAIN = 0x3 + NFT_MSG_GETCHAIN = 0x4 + NFT_MSG_DELCHAIN = 0x5 + NFT_MSG_NEWRULE = 0x6 + NFT_MSG_GETRULE = 0x7 + NFT_MSG_DELRULE = 0x8 + NFT_MSG_NEWSET = 0x9 + NFT_MSG_GETSET = 0xa + NFT_MSG_DELSET = 0xb + NFT_MSG_NEWSETELEM = 0xc + NFT_MSG_GETSETELEM = 0xd + NFT_MSG_DELSETELEM = 0xe + NFT_MSG_NEWGEN = 0xf + NFT_MSG_GETGEN = 0x10 + NFT_MSG_TRACE = 0x11 + NFT_MSG_NEWOBJ = 0x12 + NFT_MSG_GETOBJ = 0x13 + NFT_MSG_DELOBJ = 0x14 + NFT_MSG_GETOBJ_RESET = 0x15 + NFT_MSG_MAX = 0x19 + NFTA_LIST_UNPEC = 0x0 + NFTA_LIST_ELEM = 0x1 + NFTA_HOOK_UNSPEC = 0x0 + NFTA_HOOK_HOOKNUM = 0x1 + NFTA_HOOK_PRIORITY = 0x2 + NFTA_HOOK_DEV = 0x3 + NFT_TABLE_F_DORMANT = 0x1 + NFTA_TABLE_UNSPEC = 0x0 + NFTA_TABLE_NAME = 0x1 + NFTA_TABLE_FLAGS = 0x2 + NFTA_TABLE_USE = 0x3 + NFTA_CHAIN_UNSPEC = 0x0 + NFTA_CHAIN_TABLE = 0x1 + NFTA_CHAIN_HANDLE = 0x2 + NFTA_CHAIN_NAME = 0x3 + NFTA_CHAIN_HOOK = 0x4 + NFTA_CHAIN_POLICY = 0x5 + NFTA_CHAIN_USE = 0x6 + NFTA_CHAIN_TYPE = 0x7 + NFTA_CHAIN_COUNTERS = 0x8 + NFTA_CHAIN_PAD = 0x9 + NFTA_RULE_UNSPEC = 0x0 + NFTA_RULE_TABLE = 0x1 + NFTA_RULE_CHAIN = 0x2 + NFTA_RULE_HANDLE = 0x3 + NFTA_RULE_EXPRESSIONS = 0x4 + NFTA_RULE_COMPAT = 0x5 + NFTA_RULE_POSITION = 0x6 + NFTA_RULE_USERDATA = 0x7 + NFTA_RULE_PAD = 0x8 + NFTA_RULE_ID = 0x9 + NFT_RULE_COMPAT_F_INV = 0x2 + NFT_RULE_COMPAT_F_MASK = 0x2 + NFTA_RULE_COMPAT_UNSPEC = 0x0 + NFTA_RULE_COMPAT_PROTO = 0x1 + NFTA_RULE_COMPAT_FLAGS = 0x2 + NFT_SET_ANONYMOUS = 0x1 + NFT_SET_CONSTANT = 0x2 + NFT_SET_INTERVAL = 0x4 + NFT_SET_MAP = 0x8 + NFT_SET_TIMEOUT = 0x10 + NFT_SET_EVAL = 0x20 + NFT_SET_OBJECT = 0x40 + NFT_SET_POL_PERFORMANCE = 0x0 + NFT_SET_POL_MEMORY = 0x1 + NFTA_SET_DESC_UNSPEC = 0x0 + NFTA_SET_DESC_SIZE = 0x1 + NFTA_SET_UNSPEC = 0x0 + NFTA_SET_TABLE = 0x1 + NFTA_SET_NAME = 0x2 + NFTA_SET_FLAGS = 0x3 + NFTA_SET_KEY_TYPE = 0x4 + NFTA_SET_KEY_LEN = 0x5 + NFTA_SET_DATA_TYPE = 0x6 + NFTA_SET_DATA_LEN = 0x7 + NFTA_SET_POLICY = 0x8 + NFTA_SET_DESC = 0x9 + NFTA_SET_ID = 0xa + NFTA_SET_TIMEOUT = 0xb + NFTA_SET_GC_INTERVAL = 0xc + NFTA_SET_USERDATA = 0xd + NFTA_SET_PAD = 0xe + NFTA_SET_OBJ_TYPE = 0xf + NFT_SET_ELEM_INTERVAL_END = 0x1 + NFTA_SET_ELEM_UNSPEC = 0x0 + NFTA_SET_ELEM_KEY = 0x1 + NFTA_SET_ELEM_DATA = 0x2 + NFTA_SET_ELEM_FLAGS = 0x3 + NFTA_SET_ELEM_TIMEOUT = 0x4 + NFTA_SET_ELEM_EXPIRATION = 0x5 + NFTA_SET_ELEM_USERDATA = 0x6 + NFTA_SET_ELEM_EXPR = 0x7 + NFTA_SET_ELEM_PAD = 0x8 + NFTA_SET_ELEM_OBJREF = 0x9 + NFTA_SET_ELEM_LIST_UNSPEC = 0x0 + NFTA_SET_ELEM_LIST_TABLE = 0x1 + NFTA_SET_ELEM_LIST_SET = 0x2 + NFTA_SET_ELEM_LIST_ELEMENTS = 0x3 + NFTA_SET_ELEM_LIST_SET_ID = 0x4 + NFT_DATA_VALUE = 0x0 + NFT_DATA_VERDICT = 0xffffff00 + NFTA_DATA_UNSPEC = 0x0 + NFTA_DATA_VALUE = 0x1 + NFTA_DATA_VERDICT = 0x2 + NFTA_VERDICT_UNSPEC = 0x0 + NFTA_VERDICT_CODE = 0x1 + NFTA_VERDICT_CHAIN = 0x2 + NFTA_EXPR_UNSPEC = 0x0 + NFTA_EXPR_NAME = 0x1 + NFTA_EXPR_DATA = 0x2 + NFTA_IMMEDIATE_UNSPEC = 0x0 + NFTA_IMMEDIATE_DREG = 0x1 + NFTA_IMMEDIATE_DATA = 0x2 + NFTA_BITWISE_UNSPEC = 0x0 + NFTA_BITWISE_SREG = 0x1 + NFTA_BITWISE_DREG = 0x2 + NFTA_BITWISE_LEN = 0x3 + NFTA_BITWISE_MASK = 0x4 + NFTA_BITWISE_XOR = 0x5 + NFT_BYTEORDER_NTOH = 0x0 + NFT_BYTEORDER_HTON = 0x1 + NFTA_BYTEORDER_UNSPEC = 0x0 + NFTA_BYTEORDER_SREG = 0x1 + NFTA_BYTEORDER_DREG = 0x2 + NFTA_BYTEORDER_OP = 0x3 + NFTA_BYTEORDER_LEN = 0x4 + NFTA_BYTEORDER_SIZE = 0x5 + NFT_CMP_EQ = 0x0 + NFT_CMP_NEQ = 0x1 + NFT_CMP_LT = 0x2 + NFT_CMP_LTE = 0x3 + NFT_CMP_GT = 0x4 + NFT_CMP_GTE = 0x5 + NFTA_CMP_UNSPEC = 0x0 + NFTA_CMP_SREG = 0x1 + NFTA_CMP_OP = 0x2 + NFTA_CMP_DATA = 0x3 + NFT_RANGE_EQ = 0x0 + NFT_RANGE_NEQ = 0x1 + NFTA_RANGE_UNSPEC = 0x0 + NFTA_RANGE_SREG = 0x1 + NFTA_RANGE_OP = 0x2 + NFTA_RANGE_FROM_DATA = 0x3 + NFTA_RANGE_TO_DATA = 0x4 + NFT_LOOKUP_F_INV = 0x1 + NFTA_LOOKUP_UNSPEC = 0x0 + NFTA_LOOKUP_SET = 0x1 + NFTA_LOOKUP_SREG = 0x2 + NFTA_LOOKUP_DREG = 0x3 + NFTA_LOOKUP_SET_ID = 0x4 + NFTA_LOOKUP_FLAGS = 0x5 + NFT_DYNSET_OP_ADD = 0x0 + NFT_DYNSET_OP_UPDATE = 0x1 + NFT_DYNSET_F_INV = 0x1 + NFTA_DYNSET_UNSPEC = 0x0 + NFTA_DYNSET_SET_NAME = 0x1 + NFTA_DYNSET_SET_ID = 0x2 + NFTA_DYNSET_OP = 0x3 + NFTA_DYNSET_SREG_KEY = 0x4 + NFTA_DYNSET_SREG_DATA = 0x5 + NFTA_DYNSET_TIMEOUT = 0x6 + NFTA_DYNSET_EXPR = 0x7 + NFTA_DYNSET_PAD = 0x8 + NFTA_DYNSET_FLAGS = 0x9 + NFT_PAYLOAD_LL_HEADER = 0x0 + NFT_PAYLOAD_NETWORK_HEADER = 0x1 + NFT_PAYLOAD_TRANSPORT_HEADER = 0x2 + NFT_PAYLOAD_CSUM_NONE = 0x0 + NFT_PAYLOAD_CSUM_INET = 0x1 + NFT_PAYLOAD_L4CSUM_PSEUDOHDR = 0x1 + NFTA_PAYLOAD_UNSPEC = 0x0 + NFTA_PAYLOAD_DREG = 0x1 + NFTA_PAYLOAD_BASE = 0x2 + NFTA_PAYLOAD_OFFSET = 0x3 + NFTA_PAYLOAD_LEN = 0x4 + NFTA_PAYLOAD_SREG = 0x5 + NFTA_PAYLOAD_CSUM_TYPE = 0x6 + NFTA_PAYLOAD_CSUM_OFFSET = 0x7 + NFTA_PAYLOAD_CSUM_FLAGS = 0x8 + NFT_EXTHDR_F_PRESENT = 0x1 + NFT_EXTHDR_OP_IPV6 = 0x0 + NFT_EXTHDR_OP_TCPOPT = 0x1 + NFTA_EXTHDR_UNSPEC = 0x0 + NFTA_EXTHDR_DREG = 0x1 + NFTA_EXTHDR_TYPE = 0x2 + NFTA_EXTHDR_OFFSET = 0x3 + NFTA_EXTHDR_LEN = 0x4 + NFTA_EXTHDR_FLAGS = 0x5 + NFTA_EXTHDR_OP = 0x6 + NFTA_EXTHDR_SREG = 0x7 + NFT_META_LEN = 0x0 + NFT_META_PROTOCOL = 0x1 + NFT_META_PRIORITY = 0x2 + NFT_META_MARK = 0x3 + NFT_META_IIF = 0x4 + NFT_META_OIF = 0x5 + NFT_META_IIFNAME = 0x6 + NFT_META_OIFNAME = 0x7 + NFT_META_IIFTYPE = 0x8 + NFT_META_OIFTYPE = 0x9 + NFT_META_SKUID = 0xa + NFT_META_SKGID = 0xb + NFT_META_NFTRACE = 0xc + NFT_META_RTCLASSID = 0xd + NFT_META_SECMARK = 0xe + NFT_META_NFPROTO = 0xf + NFT_META_L4PROTO = 0x10 + NFT_META_BRI_IIFNAME = 0x11 + NFT_META_BRI_OIFNAME = 0x12 + NFT_META_PKTTYPE = 0x13 + NFT_META_CPU = 0x14 + NFT_META_IIFGROUP = 0x15 + NFT_META_OIFGROUP = 0x16 + NFT_META_CGROUP = 0x17 + NFT_META_PRANDOM = 0x18 + NFT_RT_CLASSID = 0x0 + NFT_RT_NEXTHOP4 = 0x1 + NFT_RT_NEXTHOP6 = 0x2 + NFT_RT_TCPMSS = 0x3 + NFT_HASH_JENKINS = 0x0 + NFT_HASH_SYM = 0x1 + NFTA_HASH_UNSPEC = 0x0 + NFTA_HASH_SREG = 0x1 + NFTA_HASH_DREG = 0x2 + NFTA_HASH_LEN = 0x3 + NFTA_HASH_MODULUS = 0x4 + NFTA_HASH_SEED = 0x5 + NFTA_HASH_OFFSET = 0x6 + NFTA_HASH_TYPE = 0x7 + NFTA_META_UNSPEC = 0x0 + NFTA_META_DREG = 0x1 + NFTA_META_KEY = 0x2 + NFTA_META_SREG = 0x3 + NFTA_RT_UNSPEC = 0x0 + NFTA_RT_DREG = 0x1 + NFTA_RT_KEY = 0x2 + NFT_CT_STATE = 0x0 + NFT_CT_DIRECTION = 0x1 + NFT_CT_STATUS = 0x2 + NFT_CT_MARK = 0x3 + NFT_CT_SECMARK = 0x4 + NFT_CT_EXPIRATION = 0x5 + NFT_CT_HELPER = 0x6 + NFT_CT_L3PROTOCOL = 0x7 + NFT_CT_SRC = 0x8 + NFT_CT_DST = 0x9 + NFT_CT_PROTOCOL = 0xa + NFT_CT_PROTO_SRC = 0xb + NFT_CT_PROTO_DST = 0xc + NFT_CT_LABELS = 0xd + NFT_CT_PKTS = 0xe + NFT_CT_BYTES = 0xf + NFT_CT_AVGPKT = 0x10 + NFT_CT_ZONE = 0x11 + NFT_CT_EVENTMASK = 0x12 + NFTA_CT_UNSPEC = 0x0 + NFTA_CT_DREG = 0x1 + NFTA_CT_KEY = 0x2 + NFTA_CT_DIRECTION = 0x3 + NFTA_CT_SREG = 0x4 + NFT_LIMIT_PKTS = 0x0 + NFT_LIMIT_PKT_BYTES = 0x1 + NFT_LIMIT_F_INV = 0x1 + NFTA_LIMIT_UNSPEC = 0x0 + NFTA_LIMIT_RATE = 0x1 + NFTA_LIMIT_UNIT = 0x2 + NFTA_LIMIT_BURST = 0x3 + NFTA_LIMIT_TYPE = 0x4 + NFTA_LIMIT_FLAGS = 0x5 + NFTA_LIMIT_PAD = 0x6 + NFTA_COUNTER_UNSPEC = 0x0 + NFTA_COUNTER_BYTES = 0x1 + NFTA_COUNTER_PACKETS = 0x2 + NFTA_COUNTER_PAD = 0x3 + NFTA_LOG_UNSPEC = 0x0 + NFTA_LOG_GROUP = 0x1 + NFTA_LOG_PREFIX = 0x2 + NFTA_LOG_SNAPLEN = 0x3 + NFTA_LOG_QTHRESHOLD = 0x4 + NFTA_LOG_LEVEL = 0x5 + NFTA_LOG_FLAGS = 0x6 + NFTA_QUEUE_UNSPEC = 0x0 + NFTA_QUEUE_NUM = 0x1 + NFTA_QUEUE_TOTAL = 0x2 + NFTA_QUEUE_FLAGS = 0x3 + NFTA_QUEUE_SREG_QNUM = 0x4 + NFT_QUOTA_F_INV = 0x1 + NFT_QUOTA_F_DEPLETED = 0x2 + NFTA_QUOTA_UNSPEC = 0x0 + NFTA_QUOTA_BYTES = 0x1 + NFTA_QUOTA_FLAGS = 0x2 + NFTA_QUOTA_PAD = 0x3 + NFTA_QUOTA_CONSUMED = 0x4 + NFT_REJECT_ICMP_UNREACH = 0x0 + NFT_REJECT_TCP_RST = 0x1 + NFT_REJECT_ICMPX_UNREACH = 0x2 + NFT_REJECT_ICMPX_NO_ROUTE = 0x0 + NFT_REJECT_ICMPX_PORT_UNREACH = 0x1 + NFT_REJECT_ICMPX_HOST_UNREACH = 0x2 + NFT_REJECT_ICMPX_ADMIN_PROHIBITED = 0x3 + NFTA_REJECT_UNSPEC = 0x0 + NFTA_REJECT_TYPE = 0x1 + NFTA_REJECT_ICMP_CODE = 0x2 + NFT_NAT_SNAT = 0x0 + NFT_NAT_DNAT = 0x1 + NFTA_NAT_UNSPEC = 0x0 + NFTA_NAT_TYPE = 0x1 + NFTA_NAT_FAMILY = 0x2 + NFTA_NAT_REG_ADDR_MIN = 0x3 + NFTA_NAT_REG_ADDR_MAX = 0x4 + NFTA_NAT_REG_PROTO_MIN = 0x5 + NFTA_NAT_REG_PROTO_MAX = 0x6 + NFTA_NAT_FLAGS = 0x7 + NFTA_MASQ_UNSPEC = 0x0 + NFTA_MASQ_FLAGS = 0x1 + NFTA_MASQ_REG_PROTO_MIN = 0x2 + NFTA_MASQ_REG_PROTO_MAX = 0x3 + NFTA_REDIR_UNSPEC = 0x0 + NFTA_REDIR_REG_PROTO_MIN = 0x1 + NFTA_REDIR_REG_PROTO_MAX = 0x2 + NFTA_REDIR_FLAGS = 0x3 + NFTA_DUP_UNSPEC = 0x0 + NFTA_DUP_SREG_ADDR = 0x1 + NFTA_DUP_SREG_DEV = 0x2 + NFTA_FWD_UNSPEC = 0x0 + NFTA_FWD_SREG_DEV = 0x1 + NFTA_OBJREF_UNSPEC = 0x0 + NFTA_OBJREF_IMM_TYPE = 0x1 + NFTA_OBJREF_IMM_NAME = 0x2 + NFTA_OBJREF_SET_SREG = 0x3 + NFTA_OBJREF_SET_NAME = 0x4 + NFTA_OBJREF_SET_ID = 0x5 + NFTA_GEN_UNSPEC = 0x0 + NFTA_GEN_ID = 0x1 + NFTA_GEN_PROC_PID = 0x2 + NFTA_GEN_PROC_NAME = 0x3 + NFTA_FIB_UNSPEC = 0x0 + NFTA_FIB_DREG = 0x1 + NFTA_FIB_RESULT = 0x2 + NFTA_FIB_FLAGS = 0x3 + NFT_FIB_RESULT_UNSPEC = 0x0 + NFT_FIB_RESULT_OIF = 0x1 + NFT_FIB_RESULT_OIFNAME = 0x2 + NFT_FIB_RESULT_ADDRTYPE = 0x3 + NFTA_FIB_F_SADDR = 0x1 + NFTA_FIB_F_DADDR = 0x2 + NFTA_FIB_F_MARK = 0x4 + NFTA_FIB_F_IIF = 0x8 + NFTA_FIB_F_OIF = 0x10 + NFTA_FIB_F_PRESENT = 0x20 + NFTA_CT_HELPER_UNSPEC = 0x0 + NFTA_CT_HELPER_NAME = 0x1 + NFTA_CT_HELPER_L3PROTO = 0x2 + NFTA_CT_HELPER_L4PROTO = 0x3 + NFTA_OBJ_UNSPEC = 0x0 + NFTA_OBJ_TABLE = 0x1 + NFTA_OBJ_NAME = 0x2 + NFTA_OBJ_TYPE = 0x3 + NFTA_OBJ_DATA = 0x4 + NFTA_OBJ_USE = 0x5 + NFTA_TRACE_UNSPEC = 0x0 + NFTA_TRACE_TABLE = 0x1 + NFTA_TRACE_CHAIN = 0x2 + NFTA_TRACE_RULE_HANDLE = 0x3 + NFTA_TRACE_TYPE = 0x4 + NFTA_TRACE_VERDICT = 0x5 + NFTA_TRACE_ID = 0x6 + NFTA_TRACE_LL_HEADER = 0x7 + NFTA_TRACE_NETWORK_HEADER = 0x8 + NFTA_TRACE_TRANSPORT_HEADER = 0x9 + NFTA_TRACE_IIF = 0xa + NFTA_TRACE_IIFTYPE = 0xb + NFTA_TRACE_OIF = 0xc + NFTA_TRACE_OIFTYPE = 0xd + NFTA_TRACE_MARK = 0xe + NFTA_TRACE_NFPROTO = 0xf + NFTA_TRACE_POLICY = 0x10 + NFTA_TRACE_PAD = 0x11 + NFT_TRACETYPE_UNSPEC = 0x0 + NFT_TRACETYPE_POLICY = 0x1 + NFT_TRACETYPE_RETURN = 0x2 + NFT_TRACETYPE_RULE = 0x3 + NFTA_NG_UNSPEC = 0x0 + NFTA_NG_DREG = 0x1 + NFTA_NG_MODULUS = 0x2 + NFTA_NG_TYPE = 0x3 + NFTA_NG_OFFSET = 0x4 + NFT_NG_INCREMENTAL = 0x0 + NFT_NG_RANDOM = 0x1 +) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go b/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go index a181469ca1b..18d577b9125 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go @@ -96,47 +96,31 @@ type Rlimit struct { type _Gid_t uint32 type Stat_t struct { - Dev uint64 - X__pad1 uint16 - _ [2]byte - X__st_ino uint32 - Mode uint32 - Nlink uint32 - Uid uint32 - Gid uint32 - Rdev uint64 - X__pad2 uint16 - _ [6]byte - Size int64 - Blksize int32 - _ [4]byte - Blocks int64 - Atim Timespec - Mtim Timespec - Ctim Timespec - Ino uint64 -} - -type Statfs_t struct { - Type int32 - Bsize int32 - Blocks uint64 - Bfree uint64 - Bavail uint64 - Files uint64 - Ffree uint64 - Fsid Fsid - Namelen int32 - Frsize int32 - Flags int32 - Spare [4]int32 + Dev uint64 + _ uint16 + _ [2]byte + _ uint32 + Mode uint32 + Nlink uint32 + Uid uint32 + Gid uint32 + Rdev uint64 + _ uint16 + _ [6]byte + Size int64 + Blksize int32 _ [4]byte + Blocks int64 + Atim Timespec + Mtim Timespec + Ctim Timespec + Ino uint64 } type StatxTimestamp struct { - Sec int64 - Nsec uint32 - X__reserved int32 + Sec int64 + Nsec uint32 + _ int32 } type Statx_t struct { @@ -173,7 +157,7 @@ type Dirent struct { } type Fsid struct { - X__val [2]int32 + Val [2]int32 } type Flock_t struct { @@ -495,7 +479,7 @@ const ( IFLA_EVENT = 0x2c IFLA_NEW_NETNSID = 0x2d IFLA_IF_NETNSID = 0x2e - IFLA_MAX = 0x2e + IFLA_MAX = 0x31 RT_SCOPE_UNIVERSE = 0x0 RT_SCOPE_SITE = 0xc8 RT_SCOPE_LINK = 0xfd @@ -587,12 +571,12 @@ type RtAttr struct { } type IfInfomsg struct { - Family uint8 - X__ifi_pad uint8 - Type uint16 - Index int32 - Flags uint32 - Change uint32 + Family uint8 + _ uint8 + Type uint16 + Index int32 + Flags uint32 + Change uint32 } type IfAddrmsg struct { @@ -671,7 +655,7 @@ type Sysinfo_t struct { Totalhigh uint32 Freehigh uint32 Unit uint32 - X_f [8]uint8 + _ [8]uint8 } type Utsname struct { @@ -728,7 +712,7 @@ const ( ) type Sigset_t struct { - X__val [32]uint32 + Val [32]uint32 } const RNDGETENTCNT = 0x80045200 @@ -884,3 +868,937 @@ const ( BDADDR_LE_PUBLIC = 0x1 BDADDR_LE_RANDOM = 0x2 ) + +type PerfEventAttr struct { + Type uint32 + Size uint32 + Config uint64 + Sample uint64 + Sample_type uint64 + Read_format uint64 + Bits uint64 + Wakeup uint32 + Bp_type uint32 + Ext1 uint64 + Ext2 uint64 + Branch_sample_type uint64 + Sample_regs_user uint64 + Sample_stack_user uint32 + Clockid int32 + Sample_regs_intr uint64 + Aux_watermark uint32 + _ uint32 +} + +type PerfEventMmapPage struct { + Version uint32 + Compat_version uint32 + Lock uint32 + Index uint32 + Offset int64 + Time_enabled uint64 + Time_running uint64 + Capabilities uint64 + Pmc_width uint16 + Time_shift uint16 + Time_mult uint32 + Time_offset uint64 + Time_zero uint64 + Size uint32 + _ [948]uint8 + Data_head uint64 + Data_tail uint64 + Data_offset uint64 + Data_size uint64 + Aux_head uint64 + Aux_tail uint64 + Aux_offset uint64 + Aux_size uint64 +} + +const ( + PerfBitDisabled uint64 = CBitFieldMaskBit0 + PerfBitInherit = CBitFieldMaskBit1 + PerfBitPinned = CBitFieldMaskBit2 + PerfBitExclusive = CBitFieldMaskBit3 + PerfBitExcludeUser = CBitFieldMaskBit4 + PerfBitExcludeKernel = CBitFieldMaskBit5 + PerfBitExcludeHv = CBitFieldMaskBit6 + PerfBitExcludeIdle = CBitFieldMaskBit7 + PerfBitMmap = CBitFieldMaskBit8 + PerfBitComm = CBitFieldMaskBit9 + PerfBitFreq = CBitFieldMaskBit10 + PerfBitInheritStat = CBitFieldMaskBit11 + PerfBitEnableOnExec = CBitFieldMaskBit12 + PerfBitTask = CBitFieldMaskBit13 + PerfBitWatermark = CBitFieldMaskBit14 + PerfBitPreciseIPBit1 = CBitFieldMaskBit15 + PerfBitPreciseIPBit2 = CBitFieldMaskBit16 + PerfBitMmapData = CBitFieldMaskBit17 + PerfBitSampleIDAll = CBitFieldMaskBit18 + PerfBitExcludeHost = CBitFieldMaskBit19 + PerfBitExcludeGuest = CBitFieldMaskBit20 + PerfBitExcludeCallchainKernel = CBitFieldMaskBit21 + PerfBitExcludeCallchainUser = CBitFieldMaskBit22 + PerfBitMmap2 = CBitFieldMaskBit23 + PerfBitCommExec = CBitFieldMaskBit24 + PerfBitUseClockID = CBitFieldMaskBit25 + PerfBitContextSwitch = CBitFieldMaskBit26 +) + +const ( + PERF_TYPE_HARDWARE = 0x0 + PERF_TYPE_SOFTWARE = 0x1 + PERF_TYPE_TRACEPOINT = 0x2 + PERF_TYPE_HW_CACHE = 0x3 + PERF_TYPE_RAW = 0x4 + PERF_TYPE_BREAKPOINT = 0x5 + + PERF_COUNT_HW_CPU_CYCLES = 0x0 + PERF_COUNT_HW_INSTRUCTIONS = 0x1 + PERF_COUNT_HW_CACHE_REFERENCES = 0x2 + PERF_COUNT_HW_CACHE_MISSES = 0x3 + PERF_COUNT_HW_BRANCH_INSTRUCTIONS = 0x4 + PERF_COUNT_HW_BRANCH_MISSES = 0x5 + PERF_COUNT_HW_BUS_CYCLES = 0x6 + PERF_COUNT_HW_STALLED_CYCLES_FRONTEND = 0x7 + PERF_COUNT_HW_STALLED_CYCLES_BACKEND = 0x8 + PERF_COUNT_HW_REF_CPU_CYCLES = 0x9 + + PERF_COUNT_HW_CACHE_L1D = 0x0 + PERF_COUNT_HW_CACHE_L1I = 0x1 + PERF_COUNT_HW_CACHE_LL = 0x2 + PERF_COUNT_HW_CACHE_DTLB = 0x3 + PERF_COUNT_HW_CACHE_ITLB = 0x4 + PERF_COUNT_HW_CACHE_BPU = 0x5 + PERF_COUNT_HW_CACHE_NODE = 0x6 + + PERF_COUNT_HW_CACHE_OP_READ = 0x0 + PERF_COUNT_HW_CACHE_OP_WRITE = 0x1 + PERF_COUNT_HW_CACHE_OP_PREFETCH = 0x2 + + PERF_COUNT_HW_CACHE_RESULT_ACCESS = 0x0 + PERF_COUNT_HW_CACHE_RESULT_MISS = 0x1 + + PERF_COUNT_SW_CPU_CLOCK = 0x0 + PERF_COUNT_SW_TASK_CLOCK = 0x1 + PERF_COUNT_SW_PAGE_FAULTS = 0x2 + PERF_COUNT_SW_CONTEXT_SWITCHES = 0x3 + PERF_COUNT_SW_CPU_MIGRATIONS = 0x4 + PERF_COUNT_SW_PAGE_FAULTS_MIN = 0x5 + PERF_COUNT_SW_PAGE_FAULTS_MAJ = 0x6 + PERF_COUNT_SW_ALIGNMENT_FAULTS = 0x7 + PERF_COUNT_SW_EMULATION_FAULTS = 0x8 + PERF_COUNT_SW_DUMMY = 0x9 + + PERF_SAMPLE_IP = 0x1 + PERF_SAMPLE_TID = 0x2 + PERF_SAMPLE_TIME = 0x4 + PERF_SAMPLE_ADDR = 0x8 + PERF_SAMPLE_READ = 0x10 + PERF_SAMPLE_CALLCHAIN = 0x20 + PERF_SAMPLE_ID = 0x40 + PERF_SAMPLE_CPU = 0x80 + PERF_SAMPLE_PERIOD = 0x100 + PERF_SAMPLE_STREAM_ID = 0x200 + PERF_SAMPLE_RAW = 0x400 + PERF_SAMPLE_BRANCH_STACK = 0x800 + + PERF_SAMPLE_BRANCH_USER = 0x1 + PERF_SAMPLE_BRANCH_KERNEL = 0x2 + PERF_SAMPLE_BRANCH_HV = 0x4 + PERF_SAMPLE_BRANCH_ANY = 0x8 + PERF_SAMPLE_BRANCH_ANY_CALL = 0x10 + PERF_SAMPLE_BRANCH_ANY_RETURN = 0x20 + PERF_SAMPLE_BRANCH_IND_CALL = 0x40 + + PERF_FORMAT_TOTAL_TIME_ENABLED = 0x1 + PERF_FORMAT_TOTAL_TIME_RUNNING = 0x2 + PERF_FORMAT_ID = 0x4 + PERF_FORMAT_GROUP = 0x8 + + PERF_RECORD_MMAP = 0x1 + PERF_RECORD_LOST = 0x2 + PERF_RECORD_COMM = 0x3 + PERF_RECORD_EXIT = 0x4 + PERF_RECORD_THROTTLE = 0x5 + PERF_RECORD_UNTHROTTLE = 0x6 + PERF_RECORD_FORK = 0x7 + PERF_RECORD_READ = 0x8 + PERF_RECORD_SAMPLE = 0x9 + + PERF_CONTEXT_HV = -0x20 + PERF_CONTEXT_KERNEL = -0x80 + PERF_CONTEXT_USER = -0x200 + + PERF_CONTEXT_GUEST = -0x800 + PERF_CONTEXT_GUEST_KERNEL = -0x880 + PERF_CONTEXT_GUEST_USER = -0xa00 + + PERF_FLAG_FD_NO_GROUP = 0x1 + PERF_FLAG_FD_OUTPUT = 0x2 + PERF_FLAG_PID_CGROUP = 0x4 +) + +const ( + CBitFieldMaskBit0 = 0x1 + CBitFieldMaskBit1 = 0x2 + CBitFieldMaskBit2 = 0x4 + CBitFieldMaskBit3 = 0x8 + CBitFieldMaskBit4 = 0x10 + CBitFieldMaskBit5 = 0x20 + CBitFieldMaskBit6 = 0x40 + CBitFieldMaskBit7 = 0x80 + CBitFieldMaskBit8 = 0x100 + CBitFieldMaskBit9 = 0x200 + CBitFieldMaskBit10 = 0x400 + CBitFieldMaskBit11 = 0x800 + CBitFieldMaskBit12 = 0x1000 + CBitFieldMaskBit13 = 0x2000 + CBitFieldMaskBit14 = 0x4000 + CBitFieldMaskBit15 = 0x8000 + CBitFieldMaskBit16 = 0x10000 + CBitFieldMaskBit17 = 0x20000 + CBitFieldMaskBit18 = 0x40000 + CBitFieldMaskBit19 = 0x80000 + CBitFieldMaskBit20 = 0x100000 + CBitFieldMaskBit21 = 0x200000 + CBitFieldMaskBit22 = 0x400000 + CBitFieldMaskBit23 = 0x800000 + CBitFieldMaskBit24 = 0x1000000 + CBitFieldMaskBit25 = 0x2000000 + CBitFieldMaskBit26 = 0x4000000 + CBitFieldMaskBit27 = 0x8000000 + CBitFieldMaskBit28 = 0x10000000 + CBitFieldMaskBit29 = 0x20000000 + CBitFieldMaskBit30 = 0x40000000 + CBitFieldMaskBit31 = 0x80000000 + CBitFieldMaskBit32 = 0x100000000 + CBitFieldMaskBit33 = 0x200000000 + CBitFieldMaskBit34 = 0x400000000 + CBitFieldMaskBit35 = 0x800000000 + CBitFieldMaskBit36 = 0x1000000000 + CBitFieldMaskBit37 = 0x2000000000 + CBitFieldMaskBit38 = 0x4000000000 + CBitFieldMaskBit39 = 0x8000000000 + CBitFieldMaskBit40 = 0x10000000000 + CBitFieldMaskBit41 = 0x20000000000 + CBitFieldMaskBit42 = 0x40000000000 + CBitFieldMaskBit43 = 0x80000000000 + CBitFieldMaskBit44 = 0x100000000000 + CBitFieldMaskBit45 = 0x200000000000 + CBitFieldMaskBit46 = 0x400000000000 + CBitFieldMaskBit47 = 0x800000000000 + CBitFieldMaskBit48 = 0x1000000000000 + CBitFieldMaskBit49 = 0x2000000000000 + CBitFieldMaskBit50 = 0x4000000000000 + CBitFieldMaskBit51 = 0x8000000000000 + CBitFieldMaskBit52 = 0x10000000000000 + CBitFieldMaskBit53 = 0x20000000000000 + CBitFieldMaskBit54 = 0x40000000000000 + CBitFieldMaskBit55 = 0x80000000000000 + CBitFieldMaskBit56 = 0x100000000000000 + CBitFieldMaskBit57 = 0x200000000000000 + CBitFieldMaskBit58 = 0x400000000000000 + CBitFieldMaskBit59 = 0x800000000000000 + CBitFieldMaskBit60 = 0x1000000000000000 + CBitFieldMaskBit61 = 0x2000000000000000 + CBitFieldMaskBit62 = 0x4000000000000000 + CBitFieldMaskBit63 = 0x8000000000000000 +) + +type SockaddrStorage struct { + Family uint16 + _ [122]uint8 + _ uint32 +} + +type TCPMD5Sig struct { + Addr SockaddrStorage + Flags uint8 + Prefixlen uint8 + Keylen uint16 + _ uint32 + Key [80]uint8 +} + +type HDDriveCmdHdr struct { + Command uint8 + Number uint8 + Feature uint8 + Count uint8 +} + +type HDGeometry struct { + Heads uint8 + Sectors uint8 + Cylinders uint16 + Start uint32 +} + +type HDDriveID struct { + Config uint16 + Cyls uint16 + Reserved2 uint16 + Heads uint16 + Track_bytes uint16 + Sector_bytes uint16 + Sectors uint16 + Vendor0 uint16 + Vendor1 uint16 + Vendor2 uint16 + Serial_no [20]uint8 + Buf_type uint16 + Buf_size uint16 + Ecc_bytes uint16 + Fw_rev [8]uint8 + Model [40]uint8 + Max_multsect uint8 + Vendor3 uint8 + Dword_io uint16 + Vendor4 uint8 + Capability uint8 + Reserved50 uint16 + Vendor5 uint8 + TPIO uint8 + Vendor6 uint8 + TDMA uint8 + Field_valid uint16 + Cur_cyls uint16 + Cur_heads uint16 + Cur_sectors uint16 + Cur_capacity0 uint16 + Cur_capacity1 uint16 + Multsect uint8 + Multsect_valid uint8 + Lba_capacity uint32 + Dma_1word uint16 + Dma_mword uint16 + Eide_pio_modes uint16 + Eide_dma_min uint16 + Eide_dma_time uint16 + Eide_pio uint16 + Eide_pio_iordy uint16 + Words69_70 [2]uint16 + Words71_74 [4]uint16 + Queue_depth uint16 + Words76_79 [4]uint16 + Major_rev_num uint16 + Minor_rev_num uint16 + Command_set_1 uint16 + Command_set_2 uint16 + Cfsse uint16 + Cfs_enable_1 uint16 + Cfs_enable_2 uint16 + Csf_default uint16 + Dma_ultra uint16 + Trseuc uint16 + TrsEuc uint16 + CurAPMvalues uint16 + Mprc uint16 + Hw_config uint16 + Acoustic uint16 + Msrqs uint16 + Sxfert uint16 + Sal uint16 + Spg uint32 + Lba_capacity_2 uint64 + Words104_125 [22]uint16 + Last_lun uint16 + Word127 uint16 + Dlf uint16 + Csfo uint16 + Words130_155 [26]uint16 + Word156 uint16 + Words157_159 [3]uint16 + Cfa_power uint16 + Words161_175 [15]uint16 + Words176_205 [30]uint16 + Words206_254 [49]uint16 + Integrity_word uint16 +} + +type Statfs_t struct { + Type int32 + Bsize int32 + Blocks uint64 + Bfree uint64 + Bavail uint64 + Files uint64 + Ffree uint64 + Fsid Fsid + Namelen int32 + Frsize int32 + Flags int32 + Spare [4]int32 + _ [4]byte +} + +const ( + ST_MANDLOCK = 0x40 + ST_NOATIME = 0x400 + ST_NODEV = 0x4 + ST_NODIRATIME = 0x800 + ST_NOEXEC = 0x8 + ST_NOSUID = 0x2 + ST_RDONLY = 0x1 + ST_RELATIME = 0x1000 + ST_SYNCHRONOUS = 0x10 +) + +type TpacketHdr struct { + Status uint32 + Len uint32 + Snaplen uint32 + Mac uint16 + Net uint16 + Sec uint32 + Usec uint32 +} + +type Tpacket2Hdr struct { + Status uint32 + Len uint32 + Snaplen uint32 + Mac uint16 + Net uint16 + Sec uint32 + Nsec uint32 + Vlan_tci uint16 + Vlan_tpid uint16 + _ [4]uint8 +} + +type Tpacket3Hdr struct { + Next_offset uint32 + Sec uint32 + Nsec uint32 + Snaplen uint32 + Len uint32 + Status uint32 + Mac uint16 + Net uint16 + Hv1 TpacketHdrVariant1 + _ [8]uint8 +} + +type TpacketHdrVariant1 struct { + Rxhash uint32 + Vlan_tci uint32 + Vlan_tpid uint16 + _ uint16 +} + +type TpacketBlockDesc struct { + Version uint32 + To_priv uint32 + Hdr [40]byte +} + +type TpacketReq struct { + Block_size uint32 + Block_nr uint32 + Frame_size uint32 + Frame_nr uint32 +} + +type TpacketReq3 struct { + Block_size uint32 + Block_nr uint32 + Frame_size uint32 + Frame_nr uint32 + Retire_blk_tov uint32 + Sizeof_priv uint32 + Feature_req_word uint32 +} + +type TpacketStats struct { + Packets uint32 + Drops uint32 +} + +type TpacketStatsV3 struct { + Packets uint32 + Drops uint32 + Freeze_q_cnt uint32 +} + +type TpacketAuxdata struct { + Status uint32 + Len uint32 + Snaplen uint32 + Mac uint16 + Net uint16 + Vlan_tci uint16 + Vlan_tpid uint16 +} + +const ( + TPACKET_V1 = 0x0 + TPACKET_V2 = 0x1 + TPACKET_V3 = 0x2 +) + +const ( + SizeofTpacketHdr = 0x18 + SizeofTpacket2Hdr = 0x20 + SizeofTpacket3Hdr = 0x30 +) + +const ( + NF_INET_PRE_ROUTING = 0x0 + NF_INET_LOCAL_IN = 0x1 + NF_INET_FORWARD = 0x2 + NF_INET_LOCAL_OUT = 0x3 + NF_INET_POST_ROUTING = 0x4 + NF_INET_NUMHOOKS = 0x5 +) + +const ( + NF_NETDEV_INGRESS = 0x0 + NF_NETDEV_NUMHOOKS = 0x1 +) + +const ( + NFPROTO_UNSPEC = 0x0 + NFPROTO_INET = 0x1 + NFPROTO_IPV4 = 0x2 + NFPROTO_ARP = 0x3 + NFPROTO_NETDEV = 0x5 + NFPROTO_BRIDGE = 0x7 + NFPROTO_IPV6 = 0xa + NFPROTO_DECNET = 0xc + NFPROTO_NUMPROTO = 0xd +) + +type Nfgenmsg struct { + Nfgen_family uint8 + Version uint8 + Res_id uint16 +} + +const ( + NFNL_BATCH_UNSPEC = 0x0 + NFNL_BATCH_GENID = 0x1 +) + +const ( + NFT_REG_VERDICT = 0x0 + NFT_REG_1 = 0x1 + NFT_REG_2 = 0x2 + NFT_REG_3 = 0x3 + NFT_REG_4 = 0x4 + NFT_REG32_00 = 0x8 + NFT_REG32_01 = 0x9 + NFT_REG32_02 = 0xa + NFT_REG32_03 = 0xb + NFT_REG32_04 = 0xc + NFT_REG32_05 = 0xd + NFT_REG32_06 = 0xe + NFT_REG32_07 = 0xf + NFT_REG32_08 = 0x10 + NFT_REG32_09 = 0x11 + NFT_REG32_10 = 0x12 + NFT_REG32_11 = 0x13 + NFT_REG32_12 = 0x14 + NFT_REG32_13 = 0x15 + NFT_REG32_14 = 0x16 + NFT_REG32_15 = 0x17 + NFT_CONTINUE = -0x1 + NFT_BREAK = -0x2 + NFT_JUMP = -0x3 + NFT_GOTO = -0x4 + NFT_RETURN = -0x5 + NFT_MSG_NEWTABLE = 0x0 + NFT_MSG_GETTABLE = 0x1 + NFT_MSG_DELTABLE = 0x2 + NFT_MSG_NEWCHAIN = 0x3 + NFT_MSG_GETCHAIN = 0x4 + NFT_MSG_DELCHAIN = 0x5 + NFT_MSG_NEWRULE = 0x6 + NFT_MSG_GETRULE = 0x7 + NFT_MSG_DELRULE = 0x8 + NFT_MSG_NEWSET = 0x9 + NFT_MSG_GETSET = 0xa + NFT_MSG_DELSET = 0xb + NFT_MSG_NEWSETELEM = 0xc + NFT_MSG_GETSETELEM = 0xd + NFT_MSG_DELSETELEM = 0xe + NFT_MSG_NEWGEN = 0xf + NFT_MSG_GETGEN = 0x10 + NFT_MSG_TRACE = 0x11 + NFT_MSG_NEWOBJ = 0x12 + NFT_MSG_GETOBJ = 0x13 + NFT_MSG_DELOBJ = 0x14 + NFT_MSG_GETOBJ_RESET = 0x15 + NFT_MSG_MAX = 0x19 + NFTA_LIST_UNPEC = 0x0 + NFTA_LIST_ELEM = 0x1 + NFTA_HOOK_UNSPEC = 0x0 + NFTA_HOOK_HOOKNUM = 0x1 + NFTA_HOOK_PRIORITY = 0x2 + NFTA_HOOK_DEV = 0x3 + NFT_TABLE_F_DORMANT = 0x1 + NFTA_TABLE_UNSPEC = 0x0 + NFTA_TABLE_NAME = 0x1 + NFTA_TABLE_FLAGS = 0x2 + NFTA_TABLE_USE = 0x3 + NFTA_CHAIN_UNSPEC = 0x0 + NFTA_CHAIN_TABLE = 0x1 + NFTA_CHAIN_HANDLE = 0x2 + NFTA_CHAIN_NAME = 0x3 + NFTA_CHAIN_HOOK = 0x4 + NFTA_CHAIN_POLICY = 0x5 + NFTA_CHAIN_USE = 0x6 + NFTA_CHAIN_TYPE = 0x7 + NFTA_CHAIN_COUNTERS = 0x8 + NFTA_CHAIN_PAD = 0x9 + NFTA_RULE_UNSPEC = 0x0 + NFTA_RULE_TABLE = 0x1 + NFTA_RULE_CHAIN = 0x2 + NFTA_RULE_HANDLE = 0x3 + NFTA_RULE_EXPRESSIONS = 0x4 + NFTA_RULE_COMPAT = 0x5 + NFTA_RULE_POSITION = 0x6 + NFTA_RULE_USERDATA = 0x7 + NFTA_RULE_PAD = 0x8 + NFTA_RULE_ID = 0x9 + NFT_RULE_COMPAT_F_INV = 0x2 + NFT_RULE_COMPAT_F_MASK = 0x2 + NFTA_RULE_COMPAT_UNSPEC = 0x0 + NFTA_RULE_COMPAT_PROTO = 0x1 + NFTA_RULE_COMPAT_FLAGS = 0x2 + NFT_SET_ANONYMOUS = 0x1 + NFT_SET_CONSTANT = 0x2 + NFT_SET_INTERVAL = 0x4 + NFT_SET_MAP = 0x8 + NFT_SET_TIMEOUT = 0x10 + NFT_SET_EVAL = 0x20 + NFT_SET_OBJECT = 0x40 + NFT_SET_POL_PERFORMANCE = 0x0 + NFT_SET_POL_MEMORY = 0x1 + NFTA_SET_DESC_UNSPEC = 0x0 + NFTA_SET_DESC_SIZE = 0x1 + NFTA_SET_UNSPEC = 0x0 + NFTA_SET_TABLE = 0x1 + NFTA_SET_NAME = 0x2 + NFTA_SET_FLAGS = 0x3 + NFTA_SET_KEY_TYPE = 0x4 + NFTA_SET_KEY_LEN = 0x5 + NFTA_SET_DATA_TYPE = 0x6 + NFTA_SET_DATA_LEN = 0x7 + NFTA_SET_POLICY = 0x8 + NFTA_SET_DESC = 0x9 + NFTA_SET_ID = 0xa + NFTA_SET_TIMEOUT = 0xb + NFTA_SET_GC_INTERVAL = 0xc + NFTA_SET_USERDATA = 0xd + NFTA_SET_PAD = 0xe + NFTA_SET_OBJ_TYPE = 0xf + NFT_SET_ELEM_INTERVAL_END = 0x1 + NFTA_SET_ELEM_UNSPEC = 0x0 + NFTA_SET_ELEM_KEY = 0x1 + NFTA_SET_ELEM_DATA = 0x2 + NFTA_SET_ELEM_FLAGS = 0x3 + NFTA_SET_ELEM_TIMEOUT = 0x4 + NFTA_SET_ELEM_EXPIRATION = 0x5 + NFTA_SET_ELEM_USERDATA = 0x6 + NFTA_SET_ELEM_EXPR = 0x7 + NFTA_SET_ELEM_PAD = 0x8 + NFTA_SET_ELEM_OBJREF = 0x9 + NFTA_SET_ELEM_LIST_UNSPEC = 0x0 + NFTA_SET_ELEM_LIST_TABLE = 0x1 + NFTA_SET_ELEM_LIST_SET = 0x2 + NFTA_SET_ELEM_LIST_ELEMENTS = 0x3 + NFTA_SET_ELEM_LIST_SET_ID = 0x4 + NFT_DATA_VALUE = 0x0 + NFT_DATA_VERDICT = 0xffffff00 + NFTA_DATA_UNSPEC = 0x0 + NFTA_DATA_VALUE = 0x1 + NFTA_DATA_VERDICT = 0x2 + NFTA_VERDICT_UNSPEC = 0x0 + NFTA_VERDICT_CODE = 0x1 + NFTA_VERDICT_CHAIN = 0x2 + NFTA_EXPR_UNSPEC = 0x0 + NFTA_EXPR_NAME = 0x1 + NFTA_EXPR_DATA = 0x2 + NFTA_IMMEDIATE_UNSPEC = 0x0 + NFTA_IMMEDIATE_DREG = 0x1 + NFTA_IMMEDIATE_DATA = 0x2 + NFTA_BITWISE_UNSPEC = 0x0 + NFTA_BITWISE_SREG = 0x1 + NFTA_BITWISE_DREG = 0x2 + NFTA_BITWISE_LEN = 0x3 + NFTA_BITWISE_MASK = 0x4 + NFTA_BITWISE_XOR = 0x5 + NFT_BYTEORDER_NTOH = 0x0 + NFT_BYTEORDER_HTON = 0x1 + NFTA_BYTEORDER_UNSPEC = 0x0 + NFTA_BYTEORDER_SREG = 0x1 + NFTA_BYTEORDER_DREG = 0x2 + NFTA_BYTEORDER_OP = 0x3 + NFTA_BYTEORDER_LEN = 0x4 + NFTA_BYTEORDER_SIZE = 0x5 + NFT_CMP_EQ = 0x0 + NFT_CMP_NEQ = 0x1 + NFT_CMP_LT = 0x2 + NFT_CMP_LTE = 0x3 + NFT_CMP_GT = 0x4 + NFT_CMP_GTE = 0x5 + NFTA_CMP_UNSPEC = 0x0 + NFTA_CMP_SREG = 0x1 + NFTA_CMP_OP = 0x2 + NFTA_CMP_DATA = 0x3 + NFT_RANGE_EQ = 0x0 + NFT_RANGE_NEQ = 0x1 + NFTA_RANGE_UNSPEC = 0x0 + NFTA_RANGE_SREG = 0x1 + NFTA_RANGE_OP = 0x2 + NFTA_RANGE_FROM_DATA = 0x3 + NFTA_RANGE_TO_DATA = 0x4 + NFT_LOOKUP_F_INV = 0x1 + NFTA_LOOKUP_UNSPEC = 0x0 + NFTA_LOOKUP_SET = 0x1 + NFTA_LOOKUP_SREG = 0x2 + NFTA_LOOKUP_DREG = 0x3 + NFTA_LOOKUP_SET_ID = 0x4 + NFTA_LOOKUP_FLAGS = 0x5 + NFT_DYNSET_OP_ADD = 0x0 + NFT_DYNSET_OP_UPDATE = 0x1 + NFT_DYNSET_F_INV = 0x1 + NFTA_DYNSET_UNSPEC = 0x0 + NFTA_DYNSET_SET_NAME = 0x1 + NFTA_DYNSET_SET_ID = 0x2 + NFTA_DYNSET_OP = 0x3 + NFTA_DYNSET_SREG_KEY = 0x4 + NFTA_DYNSET_SREG_DATA = 0x5 + NFTA_DYNSET_TIMEOUT = 0x6 + NFTA_DYNSET_EXPR = 0x7 + NFTA_DYNSET_PAD = 0x8 + NFTA_DYNSET_FLAGS = 0x9 + NFT_PAYLOAD_LL_HEADER = 0x0 + NFT_PAYLOAD_NETWORK_HEADER = 0x1 + NFT_PAYLOAD_TRANSPORT_HEADER = 0x2 + NFT_PAYLOAD_CSUM_NONE = 0x0 + NFT_PAYLOAD_CSUM_INET = 0x1 + NFT_PAYLOAD_L4CSUM_PSEUDOHDR = 0x1 + NFTA_PAYLOAD_UNSPEC = 0x0 + NFTA_PAYLOAD_DREG = 0x1 + NFTA_PAYLOAD_BASE = 0x2 + NFTA_PAYLOAD_OFFSET = 0x3 + NFTA_PAYLOAD_LEN = 0x4 + NFTA_PAYLOAD_SREG = 0x5 + NFTA_PAYLOAD_CSUM_TYPE = 0x6 + NFTA_PAYLOAD_CSUM_OFFSET = 0x7 + NFTA_PAYLOAD_CSUM_FLAGS = 0x8 + NFT_EXTHDR_F_PRESENT = 0x1 + NFT_EXTHDR_OP_IPV6 = 0x0 + NFT_EXTHDR_OP_TCPOPT = 0x1 + NFTA_EXTHDR_UNSPEC = 0x0 + NFTA_EXTHDR_DREG = 0x1 + NFTA_EXTHDR_TYPE = 0x2 + NFTA_EXTHDR_OFFSET = 0x3 + NFTA_EXTHDR_LEN = 0x4 + NFTA_EXTHDR_FLAGS = 0x5 + NFTA_EXTHDR_OP = 0x6 + NFTA_EXTHDR_SREG = 0x7 + NFT_META_LEN = 0x0 + NFT_META_PROTOCOL = 0x1 + NFT_META_PRIORITY = 0x2 + NFT_META_MARK = 0x3 + NFT_META_IIF = 0x4 + NFT_META_OIF = 0x5 + NFT_META_IIFNAME = 0x6 + NFT_META_OIFNAME = 0x7 + NFT_META_IIFTYPE = 0x8 + NFT_META_OIFTYPE = 0x9 + NFT_META_SKUID = 0xa + NFT_META_SKGID = 0xb + NFT_META_NFTRACE = 0xc + NFT_META_RTCLASSID = 0xd + NFT_META_SECMARK = 0xe + NFT_META_NFPROTO = 0xf + NFT_META_L4PROTO = 0x10 + NFT_META_BRI_IIFNAME = 0x11 + NFT_META_BRI_OIFNAME = 0x12 + NFT_META_PKTTYPE = 0x13 + NFT_META_CPU = 0x14 + NFT_META_IIFGROUP = 0x15 + NFT_META_OIFGROUP = 0x16 + NFT_META_CGROUP = 0x17 + NFT_META_PRANDOM = 0x18 + NFT_RT_CLASSID = 0x0 + NFT_RT_NEXTHOP4 = 0x1 + NFT_RT_NEXTHOP6 = 0x2 + NFT_RT_TCPMSS = 0x3 + NFT_HASH_JENKINS = 0x0 + NFT_HASH_SYM = 0x1 + NFTA_HASH_UNSPEC = 0x0 + NFTA_HASH_SREG = 0x1 + NFTA_HASH_DREG = 0x2 + NFTA_HASH_LEN = 0x3 + NFTA_HASH_MODULUS = 0x4 + NFTA_HASH_SEED = 0x5 + NFTA_HASH_OFFSET = 0x6 + NFTA_HASH_TYPE = 0x7 + NFTA_META_UNSPEC = 0x0 + NFTA_META_DREG = 0x1 + NFTA_META_KEY = 0x2 + NFTA_META_SREG = 0x3 + NFTA_RT_UNSPEC = 0x0 + NFTA_RT_DREG = 0x1 + NFTA_RT_KEY = 0x2 + NFT_CT_STATE = 0x0 + NFT_CT_DIRECTION = 0x1 + NFT_CT_STATUS = 0x2 + NFT_CT_MARK = 0x3 + NFT_CT_SECMARK = 0x4 + NFT_CT_EXPIRATION = 0x5 + NFT_CT_HELPER = 0x6 + NFT_CT_L3PROTOCOL = 0x7 + NFT_CT_SRC = 0x8 + NFT_CT_DST = 0x9 + NFT_CT_PROTOCOL = 0xa + NFT_CT_PROTO_SRC = 0xb + NFT_CT_PROTO_DST = 0xc + NFT_CT_LABELS = 0xd + NFT_CT_PKTS = 0xe + NFT_CT_BYTES = 0xf + NFT_CT_AVGPKT = 0x10 + NFT_CT_ZONE = 0x11 + NFT_CT_EVENTMASK = 0x12 + NFTA_CT_UNSPEC = 0x0 + NFTA_CT_DREG = 0x1 + NFTA_CT_KEY = 0x2 + NFTA_CT_DIRECTION = 0x3 + NFTA_CT_SREG = 0x4 + NFT_LIMIT_PKTS = 0x0 + NFT_LIMIT_PKT_BYTES = 0x1 + NFT_LIMIT_F_INV = 0x1 + NFTA_LIMIT_UNSPEC = 0x0 + NFTA_LIMIT_RATE = 0x1 + NFTA_LIMIT_UNIT = 0x2 + NFTA_LIMIT_BURST = 0x3 + NFTA_LIMIT_TYPE = 0x4 + NFTA_LIMIT_FLAGS = 0x5 + NFTA_LIMIT_PAD = 0x6 + NFTA_COUNTER_UNSPEC = 0x0 + NFTA_COUNTER_BYTES = 0x1 + NFTA_COUNTER_PACKETS = 0x2 + NFTA_COUNTER_PAD = 0x3 + NFTA_LOG_UNSPEC = 0x0 + NFTA_LOG_GROUP = 0x1 + NFTA_LOG_PREFIX = 0x2 + NFTA_LOG_SNAPLEN = 0x3 + NFTA_LOG_QTHRESHOLD = 0x4 + NFTA_LOG_LEVEL = 0x5 + NFTA_LOG_FLAGS = 0x6 + NFTA_QUEUE_UNSPEC = 0x0 + NFTA_QUEUE_NUM = 0x1 + NFTA_QUEUE_TOTAL = 0x2 + NFTA_QUEUE_FLAGS = 0x3 + NFTA_QUEUE_SREG_QNUM = 0x4 + NFT_QUOTA_F_INV = 0x1 + NFT_QUOTA_F_DEPLETED = 0x2 + NFTA_QUOTA_UNSPEC = 0x0 + NFTA_QUOTA_BYTES = 0x1 + NFTA_QUOTA_FLAGS = 0x2 + NFTA_QUOTA_PAD = 0x3 + NFTA_QUOTA_CONSUMED = 0x4 + NFT_REJECT_ICMP_UNREACH = 0x0 + NFT_REJECT_TCP_RST = 0x1 + NFT_REJECT_ICMPX_UNREACH = 0x2 + NFT_REJECT_ICMPX_NO_ROUTE = 0x0 + NFT_REJECT_ICMPX_PORT_UNREACH = 0x1 + NFT_REJECT_ICMPX_HOST_UNREACH = 0x2 + NFT_REJECT_ICMPX_ADMIN_PROHIBITED = 0x3 + NFTA_REJECT_UNSPEC = 0x0 + NFTA_REJECT_TYPE = 0x1 + NFTA_REJECT_ICMP_CODE = 0x2 + NFT_NAT_SNAT = 0x0 + NFT_NAT_DNAT = 0x1 + NFTA_NAT_UNSPEC = 0x0 + NFTA_NAT_TYPE = 0x1 + NFTA_NAT_FAMILY = 0x2 + NFTA_NAT_REG_ADDR_MIN = 0x3 + NFTA_NAT_REG_ADDR_MAX = 0x4 + NFTA_NAT_REG_PROTO_MIN = 0x5 + NFTA_NAT_REG_PROTO_MAX = 0x6 + NFTA_NAT_FLAGS = 0x7 + NFTA_MASQ_UNSPEC = 0x0 + NFTA_MASQ_FLAGS = 0x1 + NFTA_MASQ_REG_PROTO_MIN = 0x2 + NFTA_MASQ_REG_PROTO_MAX = 0x3 + NFTA_REDIR_UNSPEC = 0x0 + NFTA_REDIR_REG_PROTO_MIN = 0x1 + NFTA_REDIR_REG_PROTO_MAX = 0x2 + NFTA_REDIR_FLAGS = 0x3 + NFTA_DUP_UNSPEC = 0x0 + NFTA_DUP_SREG_ADDR = 0x1 + NFTA_DUP_SREG_DEV = 0x2 + NFTA_FWD_UNSPEC = 0x0 + NFTA_FWD_SREG_DEV = 0x1 + NFTA_OBJREF_UNSPEC = 0x0 + NFTA_OBJREF_IMM_TYPE = 0x1 + NFTA_OBJREF_IMM_NAME = 0x2 + NFTA_OBJREF_SET_SREG = 0x3 + NFTA_OBJREF_SET_NAME = 0x4 + NFTA_OBJREF_SET_ID = 0x5 + NFTA_GEN_UNSPEC = 0x0 + NFTA_GEN_ID = 0x1 + NFTA_GEN_PROC_PID = 0x2 + NFTA_GEN_PROC_NAME = 0x3 + NFTA_FIB_UNSPEC = 0x0 + NFTA_FIB_DREG = 0x1 + NFTA_FIB_RESULT = 0x2 + NFTA_FIB_FLAGS = 0x3 + NFT_FIB_RESULT_UNSPEC = 0x0 + NFT_FIB_RESULT_OIF = 0x1 + NFT_FIB_RESULT_OIFNAME = 0x2 + NFT_FIB_RESULT_ADDRTYPE = 0x3 + NFTA_FIB_F_SADDR = 0x1 + NFTA_FIB_F_DADDR = 0x2 + NFTA_FIB_F_MARK = 0x4 + NFTA_FIB_F_IIF = 0x8 + NFTA_FIB_F_OIF = 0x10 + NFTA_FIB_F_PRESENT = 0x20 + NFTA_CT_HELPER_UNSPEC = 0x0 + NFTA_CT_HELPER_NAME = 0x1 + NFTA_CT_HELPER_L3PROTO = 0x2 + NFTA_CT_HELPER_L4PROTO = 0x3 + NFTA_OBJ_UNSPEC = 0x0 + NFTA_OBJ_TABLE = 0x1 + NFTA_OBJ_NAME = 0x2 + NFTA_OBJ_TYPE = 0x3 + NFTA_OBJ_DATA = 0x4 + NFTA_OBJ_USE = 0x5 + NFTA_TRACE_UNSPEC = 0x0 + NFTA_TRACE_TABLE = 0x1 + NFTA_TRACE_CHAIN = 0x2 + NFTA_TRACE_RULE_HANDLE = 0x3 + NFTA_TRACE_TYPE = 0x4 + NFTA_TRACE_VERDICT = 0x5 + NFTA_TRACE_ID = 0x6 + NFTA_TRACE_LL_HEADER = 0x7 + NFTA_TRACE_NETWORK_HEADER = 0x8 + NFTA_TRACE_TRANSPORT_HEADER = 0x9 + NFTA_TRACE_IIF = 0xa + NFTA_TRACE_IIFTYPE = 0xb + NFTA_TRACE_OIF = 0xc + NFTA_TRACE_OIFTYPE = 0xd + NFTA_TRACE_MARK = 0xe + NFTA_TRACE_NFPROTO = 0xf + NFTA_TRACE_POLICY = 0x10 + NFTA_TRACE_PAD = 0x11 + NFT_TRACETYPE_UNSPEC = 0x0 + NFT_TRACETYPE_POLICY = 0x1 + NFT_TRACETYPE_RETURN = 0x2 + NFT_TRACETYPE_RULE = 0x3 + NFTA_NG_UNSPEC = 0x0 + NFTA_NG_DREG = 0x1 + NFTA_NG_MODULUS = 0x2 + NFTA_NG_TYPE = 0x3 + NFTA_NG_OFFSET = 0x4 + NFT_NG_INCREMENTAL = 0x0 + NFT_NG_RANDOM = 0x1 +) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go index cff50c3d3e5..6ea80563f12 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go @@ -106,10 +106,10 @@ type Stat_t struct { Uid uint32 Gid uint32 Rdev uint64 - X__pad1 uint64 + _ uint64 Size int64 Blksize int32 - X__pad2 int32 + _ int32 Blocks int64 Atim Timespec Mtim Timespec @@ -117,25 +117,10 @@ type Stat_t struct { _ [2]int32 } -type Statfs_t struct { - Type int64 - Bsize int64 - Blocks uint64 - Bfree uint64 - Bavail uint64 - Files uint64 - Ffree uint64 - Fsid Fsid - Namelen int64 - Frsize int64 - Flags int64 - Spare [4]int64 -} - type StatxTimestamp struct { - Sec int64 - Nsec uint32 - X__reserved int32 + Sec int64 + Nsec uint32 + _ int32 } type Statx_t struct { @@ -172,7 +157,7 @@ type Dirent struct { } type Fsid struct { - X__val [2]int32 + Val [2]int32 } type Flock_t struct { @@ -496,7 +481,7 @@ const ( IFLA_EVENT = 0x2c IFLA_NEW_NETNSID = 0x2d IFLA_IF_NETNSID = 0x2e - IFLA_MAX = 0x2e + IFLA_MAX = 0x31 RT_SCOPE_UNIVERSE = 0x0 RT_SCOPE_SITE = 0xc8 RT_SCOPE_LINK = 0xfd @@ -588,12 +573,12 @@ type RtAttr struct { } type IfInfomsg struct { - Family uint8 - X__ifi_pad uint8 - Type uint16 - Index int32 - Flags uint32 - Change uint32 + Family uint8 + _ uint8 + Type uint16 + Index int32 + Flags uint32 + Change uint32 } type IfAddrmsg struct { @@ -676,7 +661,7 @@ type Sysinfo_t struct { Totalhigh uint64 Freehigh uint64 Unit uint32 - X_f [0]int8 + _ [0]int8 _ [4]byte } @@ -736,7 +721,7 @@ const ( ) type Sigset_t struct { - X__val [16]uint64 + Val [16]uint64 } const RNDGETENTCNT = 0x80045200 @@ -892,3 +877,938 @@ const ( BDADDR_LE_PUBLIC = 0x1 BDADDR_LE_RANDOM = 0x2 ) + +type PerfEventAttr struct { + Type uint32 + Size uint32 + Config uint64 + Sample uint64 + Sample_type uint64 + Read_format uint64 + Bits uint64 + Wakeup uint32 + Bp_type uint32 + Ext1 uint64 + Ext2 uint64 + Branch_sample_type uint64 + Sample_regs_user uint64 + Sample_stack_user uint32 + Clockid int32 + Sample_regs_intr uint64 + Aux_watermark uint32 + _ uint32 +} + +type PerfEventMmapPage struct { + Version uint32 + Compat_version uint32 + Lock uint32 + Index uint32 + Offset int64 + Time_enabled uint64 + Time_running uint64 + Capabilities uint64 + Pmc_width uint16 + Time_shift uint16 + Time_mult uint32 + Time_offset uint64 + Time_zero uint64 + Size uint32 + _ [948]uint8 + Data_head uint64 + Data_tail uint64 + Data_offset uint64 + Data_size uint64 + Aux_head uint64 + Aux_tail uint64 + Aux_offset uint64 + Aux_size uint64 +} + +const ( + PerfBitDisabled uint64 = CBitFieldMaskBit0 + PerfBitInherit = CBitFieldMaskBit1 + PerfBitPinned = CBitFieldMaskBit2 + PerfBitExclusive = CBitFieldMaskBit3 + PerfBitExcludeUser = CBitFieldMaskBit4 + PerfBitExcludeKernel = CBitFieldMaskBit5 + PerfBitExcludeHv = CBitFieldMaskBit6 + PerfBitExcludeIdle = CBitFieldMaskBit7 + PerfBitMmap = CBitFieldMaskBit8 + PerfBitComm = CBitFieldMaskBit9 + PerfBitFreq = CBitFieldMaskBit10 + PerfBitInheritStat = CBitFieldMaskBit11 + PerfBitEnableOnExec = CBitFieldMaskBit12 + PerfBitTask = CBitFieldMaskBit13 + PerfBitWatermark = CBitFieldMaskBit14 + PerfBitPreciseIPBit1 = CBitFieldMaskBit15 + PerfBitPreciseIPBit2 = CBitFieldMaskBit16 + PerfBitMmapData = CBitFieldMaskBit17 + PerfBitSampleIDAll = CBitFieldMaskBit18 + PerfBitExcludeHost = CBitFieldMaskBit19 + PerfBitExcludeGuest = CBitFieldMaskBit20 + PerfBitExcludeCallchainKernel = CBitFieldMaskBit21 + PerfBitExcludeCallchainUser = CBitFieldMaskBit22 + PerfBitMmap2 = CBitFieldMaskBit23 + PerfBitCommExec = CBitFieldMaskBit24 + PerfBitUseClockID = CBitFieldMaskBit25 + PerfBitContextSwitch = CBitFieldMaskBit26 +) + +const ( + PERF_TYPE_HARDWARE = 0x0 + PERF_TYPE_SOFTWARE = 0x1 + PERF_TYPE_TRACEPOINT = 0x2 + PERF_TYPE_HW_CACHE = 0x3 + PERF_TYPE_RAW = 0x4 + PERF_TYPE_BREAKPOINT = 0x5 + + PERF_COUNT_HW_CPU_CYCLES = 0x0 + PERF_COUNT_HW_INSTRUCTIONS = 0x1 + PERF_COUNT_HW_CACHE_REFERENCES = 0x2 + PERF_COUNT_HW_CACHE_MISSES = 0x3 + PERF_COUNT_HW_BRANCH_INSTRUCTIONS = 0x4 + PERF_COUNT_HW_BRANCH_MISSES = 0x5 + PERF_COUNT_HW_BUS_CYCLES = 0x6 + PERF_COUNT_HW_STALLED_CYCLES_FRONTEND = 0x7 + PERF_COUNT_HW_STALLED_CYCLES_BACKEND = 0x8 + PERF_COUNT_HW_REF_CPU_CYCLES = 0x9 + + PERF_COUNT_HW_CACHE_L1D = 0x0 + PERF_COUNT_HW_CACHE_L1I = 0x1 + PERF_COUNT_HW_CACHE_LL = 0x2 + PERF_COUNT_HW_CACHE_DTLB = 0x3 + PERF_COUNT_HW_CACHE_ITLB = 0x4 + PERF_COUNT_HW_CACHE_BPU = 0x5 + PERF_COUNT_HW_CACHE_NODE = 0x6 + + PERF_COUNT_HW_CACHE_OP_READ = 0x0 + PERF_COUNT_HW_CACHE_OP_WRITE = 0x1 + PERF_COUNT_HW_CACHE_OP_PREFETCH = 0x2 + + PERF_COUNT_HW_CACHE_RESULT_ACCESS = 0x0 + PERF_COUNT_HW_CACHE_RESULT_MISS = 0x1 + + PERF_COUNT_SW_CPU_CLOCK = 0x0 + PERF_COUNT_SW_TASK_CLOCK = 0x1 + PERF_COUNT_SW_PAGE_FAULTS = 0x2 + PERF_COUNT_SW_CONTEXT_SWITCHES = 0x3 + PERF_COUNT_SW_CPU_MIGRATIONS = 0x4 + PERF_COUNT_SW_PAGE_FAULTS_MIN = 0x5 + PERF_COUNT_SW_PAGE_FAULTS_MAJ = 0x6 + PERF_COUNT_SW_ALIGNMENT_FAULTS = 0x7 + PERF_COUNT_SW_EMULATION_FAULTS = 0x8 + PERF_COUNT_SW_DUMMY = 0x9 + + PERF_SAMPLE_IP = 0x1 + PERF_SAMPLE_TID = 0x2 + PERF_SAMPLE_TIME = 0x4 + PERF_SAMPLE_ADDR = 0x8 + PERF_SAMPLE_READ = 0x10 + PERF_SAMPLE_CALLCHAIN = 0x20 + PERF_SAMPLE_ID = 0x40 + PERF_SAMPLE_CPU = 0x80 + PERF_SAMPLE_PERIOD = 0x100 + PERF_SAMPLE_STREAM_ID = 0x200 + PERF_SAMPLE_RAW = 0x400 + PERF_SAMPLE_BRANCH_STACK = 0x800 + + PERF_SAMPLE_BRANCH_USER = 0x1 + PERF_SAMPLE_BRANCH_KERNEL = 0x2 + PERF_SAMPLE_BRANCH_HV = 0x4 + PERF_SAMPLE_BRANCH_ANY = 0x8 + PERF_SAMPLE_BRANCH_ANY_CALL = 0x10 + PERF_SAMPLE_BRANCH_ANY_RETURN = 0x20 + PERF_SAMPLE_BRANCH_IND_CALL = 0x40 + + PERF_FORMAT_TOTAL_TIME_ENABLED = 0x1 + PERF_FORMAT_TOTAL_TIME_RUNNING = 0x2 + PERF_FORMAT_ID = 0x4 + PERF_FORMAT_GROUP = 0x8 + + PERF_RECORD_MMAP = 0x1 + PERF_RECORD_LOST = 0x2 + PERF_RECORD_COMM = 0x3 + PERF_RECORD_EXIT = 0x4 + PERF_RECORD_THROTTLE = 0x5 + PERF_RECORD_UNTHROTTLE = 0x6 + PERF_RECORD_FORK = 0x7 + PERF_RECORD_READ = 0x8 + PERF_RECORD_SAMPLE = 0x9 + + PERF_CONTEXT_HV = -0x20 + PERF_CONTEXT_KERNEL = -0x80 + PERF_CONTEXT_USER = -0x200 + + PERF_CONTEXT_GUEST = -0x800 + PERF_CONTEXT_GUEST_KERNEL = -0x880 + PERF_CONTEXT_GUEST_USER = -0xa00 + + PERF_FLAG_FD_NO_GROUP = 0x1 + PERF_FLAG_FD_OUTPUT = 0x2 + PERF_FLAG_PID_CGROUP = 0x4 +) + +const ( + CBitFieldMaskBit0 = 0x1 + CBitFieldMaskBit1 = 0x2 + CBitFieldMaskBit2 = 0x4 + CBitFieldMaskBit3 = 0x8 + CBitFieldMaskBit4 = 0x10 + CBitFieldMaskBit5 = 0x20 + CBitFieldMaskBit6 = 0x40 + CBitFieldMaskBit7 = 0x80 + CBitFieldMaskBit8 = 0x100 + CBitFieldMaskBit9 = 0x200 + CBitFieldMaskBit10 = 0x400 + CBitFieldMaskBit11 = 0x800 + CBitFieldMaskBit12 = 0x1000 + CBitFieldMaskBit13 = 0x2000 + CBitFieldMaskBit14 = 0x4000 + CBitFieldMaskBit15 = 0x8000 + CBitFieldMaskBit16 = 0x10000 + CBitFieldMaskBit17 = 0x20000 + CBitFieldMaskBit18 = 0x40000 + CBitFieldMaskBit19 = 0x80000 + CBitFieldMaskBit20 = 0x100000 + CBitFieldMaskBit21 = 0x200000 + CBitFieldMaskBit22 = 0x400000 + CBitFieldMaskBit23 = 0x800000 + CBitFieldMaskBit24 = 0x1000000 + CBitFieldMaskBit25 = 0x2000000 + CBitFieldMaskBit26 = 0x4000000 + CBitFieldMaskBit27 = 0x8000000 + CBitFieldMaskBit28 = 0x10000000 + CBitFieldMaskBit29 = 0x20000000 + CBitFieldMaskBit30 = 0x40000000 + CBitFieldMaskBit31 = 0x80000000 + CBitFieldMaskBit32 = 0x100000000 + CBitFieldMaskBit33 = 0x200000000 + CBitFieldMaskBit34 = 0x400000000 + CBitFieldMaskBit35 = 0x800000000 + CBitFieldMaskBit36 = 0x1000000000 + CBitFieldMaskBit37 = 0x2000000000 + CBitFieldMaskBit38 = 0x4000000000 + CBitFieldMaskBit39 = 0x8000000000 + CBitFieldMaskBit40 = 0x10000000000 + CBitFieldMaskBit41 = 0x20000000000 + CBitFieldMaskBit42 = 0x40000000000 + CBitFieldMaskBit43 = 0x80000000000 + CBitFieldMaskBit44 = 0x100000000000 + CBitFieldMaskBit45 = 0x200000000000 + CBitFieldMaskBit46 = 0x400000000000 + CBitFieldMaskBit47 = 0x800000000000 + CBitFieldMaskBit48 = 0x1000000000000 + CBitFieldMaskBit49 = 0x2000000000000 + CBitFieldMaskBit50 = 0x4000000000000 + CBitFieldMaskBit51 = 0x8000000000000 + CBitFieldMaskBit52 = 0x10000000000000 + CBitFieldMaskBit53 = 0x20000000000000 + CBitFieldMaskBit54 = 0x40000000000000 + CBitFieldMaskBit55 = 0x80000000000000 + CBitFieldMaskBit56 = 0x100000000000000 + CBitFieldMaskBit57 = 0x200000000000000 + CBitFieldMaskBit58 = 0x400000000000000 + CBitFieldMaskBit59 = 0x800000000000000 + CBitFieldMaskBit60 = 0x1000000000000000 + CBitFieldMaskBit61 = 0x2000000000000000 + CBitFieldMaskBit62 = 0x4000000000000000 + CBitFieldMaskBit63 = 0x8000000000000000 +) + +type SockaddrStorage struct { + Family uint16 + _ [118]int8 + _ uint64 +} + +type TCPMD5Sig struct { + Addr SockaddrStorage + Flags uint8 + Prefixlen uint8 + Keylen uint16 + _ uint32 + Key [80]uint8 +} + +type HDDriveCmdHdr struct { + Command uint8 + Number uint8 + Feature uint8 + Count uint8 +} + +type HDGeometry struct { + Heads uint8 + Sectors uint8 + Cylinders uint16 + _ [4]byte + Start uint64 +} + +type HDDriveID struct { + Config uint16 + Cyls uint16 + Reserved2 uint16 + Heads uint16 + Track_bytes uint16 + Sector_bytes uint16 + Sectors uint16 + Vendor0 uint16 + Vendor1 uint16 + Vendor2 uint16 + Serial_no [20]uint8 + Buf_type uint16 + Buf_size uint16 + Ecc_bytes uint16 + Fw_rev [8]uint8 + Model [40]uint8 + Max_multsect uint8 + Vendor3 uint8 + Dword_io uint16 + Vendor4 uint8 + Capability uint8 + Reserved50 uint16 + Vendor5 uint8 + TPIO uint8 + Vendor6 uint8 + TDMA uint8 + Field_valid uint16 + Cur_cyls uint16 + Cur_heads uint16 + Cur_sectors uint16 + Cur_capacity0 uint16 + Cur_capacity1 uint16 + Multsect uint8 + Multsect_valid uint8 + Lba_capacity uint32 + Dma_1word uint16 + Dma_mword uint16 + Eide_pio_modes uint16 + Eide_dma_min uint16 + Eide_dma_time uint16 + Eide_pio uint16 + Eide_pio_iordy uint16 + Words69_70 [2]uint16 + Words71_74 [4]uint16 + Queue_depth uint16 + Words76_79 [4]uint16 + Major_rev_num uint16 + Minor_rev_num uint16 + Command_set_1 uint16 + Command_set_2 uint16 + Cfsse uint16 + Cfs_enable_1 uint16 + Cfs_enable_2 uint16 + Csf_default uint16 + Dma_ultra uint16 + Trseuc uint16 + TrsEuc uint16 + CurAPMvalues uint16 + Mprc uint16 + Hw_config uint16 + Acoustic uint16 + Msrqs uint16 + Sxfert uint16 + Sal uint16 + Spg uint32 + Lba_capacity_2 uint64 + Words104_125 [22]uint16 + Last_lun uint16 + Word127 uint16 + Dlf uint16 + Csfo uint16 + Words130_155 [26]uint16 + Word156 uint16 + Words157_159 [3]uint16 + Cfa_power uint16 + Words161_175 [15]uint16 + Words176_205 [30]uint16 + Words206_254 [49]uint16 + Integrity_word uint16 +} + +type Statfs_t struct { + Type int64 + Bsize int64 + Blocks uint64 + Bfree uint64 + Bavail uint64 + Files uint64 + Ffree uint64 + Fsid Fsid + Namelen int64 + Frsize int64 + Flags int64 + Spare [4]int64 +} + +const ( + ST_MANDLOCK = 0x40 + ST_NOATIME = 0x400 + ST_NODEV = 0x4 + ST_NODIRATIME = 0x800 + ST_NOEXEC = 0x8 + ST_NOSUID = 0x2 + ST_RDONLY = 0x1 + ST_RELATIME = 0x1000 + ST_SYNCHRONOUS = 0x10 +) + +type TpacketHdr struct { + Status uint64 + Len uint32 + Snaplen uint32 + Mac uint16 + Net uint16 + Sec uint32 + Usec uint32 + _ [4]byte +} + +type Tpacket2Hdr struct { + Status uint32 + Len uint32 + Snaplen uint32 + Mac uint16 + Net uint16 + Sec uint32 + Nsec uint32 + Vlan_tci uint16 + Vlan_tpid uint16 + _ [4]uint8 +} + +type Tpacket3Hdr struct { + Next_offset uint32 + Sec uint32 + Nsec uint32 + Snaplen uint32 + Len uint32 + Status uint32 + Mac uint16 + Net uint16 + Hv1 TpacketHdrVariant1 + _ [8]uint8 +} + +type TpacketHdrVariant1 struct { + Rxhash uint32 + Vlan_tci uint32 + Vlan_tpid uint16 + _ uint16 +} + +type TpacketBlockDesc struct { + Version uint32 + To_priv uint32 + Hdr [40]byte +} + +type TpacketReq struct { + Block_size uint32 + Block_nr uint32 + Frame_size uint32 + Frame_nr uint32 +} + +type TpacketReq3 struct { + Block_size uint32 + Block_nr uint32 + Frame_size uint32 + Frame_nr uint32 + Retire_blk_tov uint32 + Sizeof_priv uint32 + Feature_req_word uint32 +} + +type TpacketStats struct { + Packets uint32 + Drops uint32 +} + +type TpacketStatsV3 struct { + Packets uint32 + Drops uint32 + Freeze_q_cnt uint32 +} + +type TpacketAuxdata struct { + Status uint32 + Len uint32 + Snaplen uint32 + Mac uint16 + Net uint16 + Vlan_tci uint16 + Vlan_tpid uint16 +} + +const ( + TPACKET_V1 = 0x0 + TPACKET_V2 = 0x1 + TPACKET_V3 = 0x2 +) + +const ( + SizeofTpacketHdr = 0x20 + SizeofTpacket2Hdr = 0x20 + SizeofTpacket3Hdr = 0x30 +) + +const ( + NF_INET_PRE_ROUTING = 0x0 + NF_INET_LOCAL_IN = 0x1 + NF_INET_FORWARD = 0x2 + NF_INET_LOCAL_OUT = 0x3 + NF_INET_POST_ROUTING = 0x4 + NF_INET_NUMHOOKS = 0x5 +) + +const ( + NF_NETDEV_INGRESS = 0x0 + NF_NETDEV_NUMHOOKS = 0x1 +) + +const ( + NFPROTO_UNSPEC = 0x0 + NFPROTO_INET = 0x1 + NFPROTO_IPV4 = 0x2 + NFPROTO_ARP = 0x3 + NFPROTO_NETDEV = 0x5 + NFPROTO_BRIDGE = 0x7 + NFPROTO_IPV6 = 0xa + NFPROTO_DECNET = 0xc + NFPROTO_NUMPROTO = 0xd +) + +type Nfgenmsg struct { + Nfgen_family uint8 + Version uint8 + Res_id uint16 +} + +const ( + NFNL_BATCH_UNSPEC = 0x0 + NFNL_BATCH_GENID = 0x1 +) + +const ( + NFT_REG_VERDICT = 0x0 + NFT_REG_1 = 0x1 + NFT_REG_2 = 0x2 + NFT_REG_3 = 0x3 + NFT_REG_4 = 0x4 + NFT_REG32_00 = 0x8 + NFT_REG32_01 = 0x9 + NFT_REG32_02 = 0xa + NFT_REG32_03 = 0xb + NFT_REG32_04 = 0xc + NFT_REG32_05 = 0xd + NFT_REG32_06 = 0xe + NFT_REG32_07 = 0xf + NFT_REG32_08 = 0x10 + NFT_REG32_09 = 0x11 + NFT_REG32_10 = 0x12 + NFT_REG32_11 = 0x13 + NFT_REG32_12 = 0x14 + NFT_REG32_13 = 0x15 + NFT_REG32_14 = 0x16 + NFT_REG32_15 = 0x17 + NFT_CONTINUE = -0x1 + NFT_BREAK = -0x2 + NFT_JUMP = -0x3 + NFT_GOTO = -0x4 + NFT_RETURN = -0x5 + NFT_MSG_NEWTABLE = 0x0 + NFT_MSG_GETTABLE = 0x1 + NFT_MSG_DELTABLE = 0x2 + NFT_MSG_NEWCHAIN = 0x3 + NFT_MSG_GETCHAIN = 0x4 + NFT_MSG_DELCHAIN = 0x5 + NFT_MSG_NEWRULE = 0x6 + NFT_MSG_GETRULE = 0x7 + NFT_MSG_DELRULE = 0x8 + NFT_MSG_NEWSET = 0x9 + NFT_MSG_GETSET = 0xa + NFT_MSG_DELSET = 0xb + NFT_MSG_NEWSETELEM = 0xc + NFT_MSG_GETSETELEM = 0xd + NFT_MSG_DELSETELEM = 0xe + NFT_MSG_NEWGEN = 0xf + NFT_MSG_GETGEN = 0x10 + NFT_MSG_TRACE = 0x11 + NFT_MSG_NEWOBJ = 0x12 + NFT_MSG_GETOBJ = 0x13 + NFT_MSG_DELOBJ = 0x14 + NFT_MSG_GETOBJ_RESET = 0x15 + NFT_MSG_MAX = 0x19 + NFTA_LIST_UNPEC = 0x0 + NFTA_LIST_ELEM = 0x1 + NFTA_HOOK_UNSPEC = 0x0 + NFTA_HOOK_HOOKNUM = 0x1 + NFTA_HOOK_PRIORITY = 0x2 + NFTA_HOOK_DEV = 0x3 + NFT_TABLE_F_DORMANT = 0x1 + NFTA_TABLE_UNSPEC = 0x0 + NFTA_TABLE_NAME = 0x1 + NFTA_TABLE_FLAGS = 0x2 + NFTA_TABLE_USE = 0x3 + NFTA_CHAIN_UNSPEC = 0x0 + NFTA_CHAIN_TABLE = 0x1 + NFTA_CHAIN_HANDLE = 0x2 + NFTA_CHAIN_NAME = 0x3 + NFTA_CHAIN_HOOK = 0x4 + NFTA_CHAIN_POLICY = 0x5 + NFTA_CHAIN_USE = 0x6 + NFTA_CHAIN_TYPE = 0x7 + NFTA_CHAIN_COUNTERS = 0x8 + NFTA_CHAIN_PAD = 0x9 + NFTA_RULE_UNSPEC = 0x0 + NFTA_RULE_TABLE = 0x1 + NFTA_RULE_CHAIN = 0x2 + NFTA_RULE_HANDLE = 0x3 + NFTA_RULE_EXPRESSIONS = 0x4 + NFTA_RULE_COMPAT = 0x5 + NFTA_RULE_POSITION = 0x6 + NFTA_RULE_USERDATA = 0x7 + NFTA_RULE_PAD = 0x8 + NFTA_RULE_ID = 0x9 + NFT_RULE_COMPAT_F_INV = 0x2 + NFT_RULE_COMPAT_F_MASK = 0x2 + NFTA_RULE_COMPAT_UNSPEC = 0x0 + NFTA_RULE_COMPAT_PROTO = 0x1 + NFTA_RULE_COMPAT_FLAGS = 0x2 + NFT_SET_ANONYMOUS = 0x1 + NFT_SET_CONSTANT = 0x2 + NFT_SET_INTERVAL = 0x4 + NFT_SET_MAP = 0x8 + NFT_SET_TIMEOUT = 0x10 + NFT_SET_EVAL = 0x20 + NFT_SET_OBJECT = 0x40 + NFT_SET_POL_PERFORMANCE = 0x0 + NFT_SET_POL_MEMORY = 0x1 + NFTA_SET_DESC_UNSPEC = 0x0 + NFTA_SET_DESC_SIZE = 0x1 + NFTA_SET_UNSPEC = 0x0 + NFTA_SET_TABLE = 0x1 + NFTA_SET_NAME = 0x2 + NFTA_SET_FLAGS = 0x3 + NFTA_SET_KEY_TYPE = 0x4 + NFTA_SET_KEY_LEN = 0x5 + NFTA_SET_DATA_TYPE = 0x6 + NFTA_SET_DATA_LEN = 0x7 + NFTA_SET_POLICY = 0x8 + NFTA_SET_DESC = 0x9 + NFTA_SET_ID = 0xa + NFTA_SET_TIMEOUT = 0xb + NFTA_SET_GC_INTERVAL = 0xc + NFTA_SET_USERDATA = 0xd + NFTA_SET_PAD = 0xe + NFTA_SET_OBJ_TYPE = 0xf + NFT_SET_ELEM_INTERVAL_END = 0x1 + NFTA_SET_ELEM_UNSPEC = 0x0 + NFTA_SET_ELEM_KEY = 0x1 + NFTA_SET_ELEM_DATA = 0x2 + NFTA_SET_ELEM_FLAGS = 0x3 + NFTA_SET_ELEM_TIMEOUT = 0x4 + NFTA_SET_ELEM_EXPIRATION = 0x5 + NFTA_SET_ELEM_USERDATA = 0x6 + NFTA_SET_ELEM_EXPR = 0x7 + NFTA_SET_ELEM_PAD = 0x8 + NFTA_SET_ELEM_OBJREF = 0x9 + NFTA_SET_ELEM_LIST_UNSPEC = 0x0 + NFTA_SET_ELEM_LIST_TABLE = 0x1 + NFTA_SET_ELEM_LIST_SET = 0x2 + NFTA_SET_ELEM_LIST_ELEMENTS = 0x3 + NFTA_SET_ELEM_LIST_SET_ID = 0x4 + NFT_DATA_VALUE = 0x0 + NFT_DATA_VERDICT = 0xffffff00 + NFTA_DATA_UNSPEC = 0x0 + NFTA_DATA_VALUE = 0x1 + NFTA_DATA_VERDICT = 0x2 + NFTA_VERDICT_UNSPEC = 0x0 + NFTA_VERDICT_CODE = 0x1 + NFTA_VERDICT_CHAIN = 0x2 + NFTA_EXPR_UNSPEC = 0x0 + NFTA_EXPR_NAME = 0x1 + NFTA_EXPR_DATA = 0x2 + NFTA_IMMEDIATE_UNSPEC = 0x0 + NFTA_IMMEDIATE_DREG = 0x1 + NFTA_IMMEDIATE_DATA = 0x2 + NFTA_BITWISE_UNSPEC = 0x0 + NFTA_BITWISE_SREG = 0x1 + NFTA_BITWISE_DREG = 0x2 + NFTA_BITWISE_LEN = 0x3 + NFTA_BITWISE_MASK = 0x4 + NFTA_BITWISE_XOR = 0x5 + NFT_BYTEORDER_NTOH = 0x0 + NFT_BYTEORDER_HTON = 0x1 + NFTA_BYTEORDER_UNSPEC = 0x0 + NFTA_BYTEORDER_SREG = 0x1 + NFTA_BYTEORDER_DREG = 0x2 + NFTA_BYTEORDER_OP = 0x3 + NFTA_BYTEORDER_LEN = 0x4 + NFTA_BYTEORDER_SIZE = 0x5 + NFT_CMP_EQ = 0x0 + NFT_CMP_NEQ = 0x1 + NFT_CMP_LT = 0x2 + NFT_CMP_LTE = 0x3 + NFT_CMP_GT = 0x4 + NFT_CMP_GTE = 0x5 + NFTA_CMP_UNSPEC = 0x0 + NFTA_CMP_SREG = 0x1 + NFTA_CMP_OP = 0x2 + NFTA_CMP_DATA = 0x3 + NFT_RANGE_EQ = 0x0 + NFT_RANGE_NEQ = 0x1 + NFTA_RANGE_UNSPEC = 0x0 + NFTA_RANGE_SREG = 0x1 + NFTA_RANGE_OP = 0x2 + NFTA_RANGE_FROM_DATA = 0x3 + NFTA_RANGE_TO_DATA = 0x4 + NFT_LOOKUP_F_INV = 0x1 + NFTA_LOOKUP_UNSPEC = 0x0 + NFTA_LOOKUP_SET = 0x1 + NFTA_LOOKUP_SREG = 0x2 + NFTA_LOOKUP_DREG = 0x3 + NFTA_LOOKUP_SET_ID = 0x4 + NFTA_LOOKUP_FLAGS = 0x5 + NFT_DYNSET_OP_ADD = 0x0 + NFT_DYNSET_OP_UPDATE = 0x1 + NFT_DYNSET_F_INV = 0x1 + NFTA_DYNSET_UNSPEC = 0x0 + NFTA_DYNSET_SET_NAME = 0x1 + NFTA_DYNSET_SET_ID = 0x2 + NFTA_DYNSET_OP = 0x3 + NFTA_DYNSET_SREG_KEY = 0x4 + NFTA_DYNSET_SREG_DATA = 0x5 + NFTA_DYNSET_TIMEOUT = 0x6 + NFTA_DYNSET_EXPR = 0x7 + NFTA_DYNSET_PAD = 0x8 + NFTA_DYNSET_FLAGS = 0x9 + NFT_PAYLOAD_LL_HEADER = 0x0 + NFT_PAYLOAD_NETWORK_HEADER = 0x1 + NFT_PAYLOAD_TRANSPORT_HEADER = 0x2 + NFT_PAYLOAD_CSUM_NONE = 0x0 + NFT_PAYLOAD_CSUM_INET = 0x1 + NFT_PAYLOAD_L4CSUM_PSEUDOHDR = 0x1 + NFTA_PAYLOAD_UNSPEC = 0x0 + NFTA_PAYLOAD_DREG = 0x1 + NFTA_PAYLOAD_BASE = 0x2 + NFTA_PAYLOAD_OFFSET = 0x3 + NFTA_PAYLOAD_LEN = 0x4 + NFTA_PAYLOAD_SREG = 0x5 + NFTA_PAYLOAD_CSUM_TYPE = 0x6 + NFTA_PAYLOAD_CSUM_OFFSET = 0x7 + NFTA_PAYLOAD_CSUM_FLAGS = 0x8 + NFT_EXTHDR_F_PRESENT = 0x1 + NFT_EXTHDR_OP_IPV6 = 0x0 + NFT_EXTHDR_OP_TCPOPT = 0x1 + NFTA_EXTHDR_UNSPEC = 0x0 + NFTA_EXTHDR_DREG = 0x1 + NFTA_EXTHDR_TYPE = 0x2 + NFTA_EXTHDR_OFFSET = 0x3 + NFTA_EXTHDR_LEN = 0x4 + NFTA_EXTHDR_FLAGS = 0x5 + NFTA_EXTHDR_OP = 0x6 + NFTA_EXTHDR_SREG = 0x7 + NFT_META_LEN = 0x0 + NFT_META_PROTOCOL = 0x1 + NFT_META_PRIORITY = 0x2 + NFT_META_MARK = 0x3 + NFT_META_IIF = 0x4 + NFT_META_OIF = 0x5 + NFT_META_IIFNAME = 0x6 + NFT_META_OIFNAME = 0x7 + NFT_META_IIFTYPE = 0x8 + NFT_META_OIFTYPE = 0x9 + NFT_META_SKUID = 0xa + NFT_META_SKGID = 0xb + NFT_META_NFTRACE = 0xc + NFT_META_RTCLASSID = 0xd + NFT_META_SECMARK = 0xe + NFT_META_NFPROTO = 0xf + NFT_META_L4PROTO = 0x10 + NFT_META_BRI_IIFNAME = 0x11 + NFT_META_BRI_OIFNAME = 0x12 + NFT_META_PKTTYPE = 0x13 + NFT_META_CPU = 0x14 + NFT_META_IIFGROUP = 0x15 + NFT_META_OIFGROUP = 0x16 + NFT_META_CGROUP = 0x17 + NFT_META_PRANDOM = 0x18 + NFT_RT_CLASSID = 0x0 + NFT_RT_NEXTHOP4 = 0x1 + NFT_RT_NEXTHOP6 = 0x2 + NFT_RT_TCPMSS = 0x3 + NFT_HASH_JENKINS = 0x0 + NFT_HASH_SYM = 0x1 + NFTA_HASH_UNSPEC = 0x0 + NFTA_HASH_SREG = 0x1 + NFTA_HASH_DREG = 0x2 + NFTA_HASH_LEN = 0x3 + NFTA_HASH_MODULUS = 0x4 + NFTA_HASH_SEED = 0x5 + NFTA_HASH_OFFSET = 0x6 + NFTA_HASH_TYPE = 0x7 + NFTA_META_UNSPEC = 0x0 + NFTA_META_DREG = 0x1 + NFTA_META_KEY = 0x2 + NFTA_META_SREG = 0x3 + NFTA_RT_UNSPEC = 0x0 + NFTA_RT_DREG = 0x1 + NFTA_RT_KEY = 0x2 + NFT_CT_STATE = 0x0 + NFT_CT_DIRECTION = 0x1 + NFT_CT_STATUS = 0x2 + NFT_CT_MARK = 0x3 + NFT_CT_SECMARK = 0x4 + NFT_CT_EXPIRATION = 0x5 + NFT_CT_HELPER = 0x6 + NFT_CT_L3PROTOCOL = 0x7 + NFT_CT_SRC = 0x8 + NFT_CT_DST = 0x9 + NFT_CT_PROTOCOL = 0xa + NFT_CT_PROTO_SRC = 0xb + NFT_CT_PROTO_DST = 0xc + NFT_CT_LABELS = 0xd + NFT_CT_PKTS = 0xe + NFT_CT_BYTES = 0xf + NFT_CT_AVGPKT = 0x10 + NFT_CT_ZONE = 0x11 + NFT_CT_EVENTMASK = 0x12 + NFTA_CT_UNSPEC = 0x0 + NFTA_CT_DREG = 0x1 + NFTA_CT_KEY = 0x2 + NFTA_CT_DIRECTION = 0x3 + NFTA_CT_SREG = 0x4 + NFT_LIMIT_PKTS = 0x0 + NFT_LIMIT_PKT_BYTES = 0x1 + NFT_LIMIT_F_INV = 0x1 + NFTA_LIMIT_UNSPEC = 0x0 + NFTA_LIMIT_RATE = 0x1 + NFTA_LIMIT_UNIT = 0x2 + NFTA_LIMIT_BURST = 0x3 + NFTA_LIMIT_TYPE = 0x4 + NFTA_LIMIT_FLAGS = 0x5 + NFTA_LIMIT_PAD = 0x6 + NFTA_COUNTER_UNSPEC = 0x0 + NFTA_COUNTER_BYTES = 0x1 + NFTA_COUNTER_PACKETS = 0x2 + NFTA_COUNTER_PAD = 0x3 + NFTA_LOG_UNSPEC = 0x0 + NFTA_LOG_GROUP = 0x1 + NFTA_LOG_PREFIX = 0x2 + NFTA_LOG_SNAPLEN = 0x3 + NFTA_LOG_QTHRESHOLD = 0x4 + NFTA_LOG_LEVEL = 0x5 + NFTA_LOG_FLAGS = 0x6 + NFTA_QUEUE_UNSPEC = 0x0 + NFTA_QUEUE_NUM = 0x1 + NFTA_QUEUE_TOTAL = 0x2 + NFTA_QUEUE_FLAGS = 0x3 + NFTA_QUEUE_SREG_QNUM = 0x4 + NFT_QUOTA_F_INV = 0x1 + NFT_QUOTA_F_DEPLETED = 0x2 + NFTA_QUOTA_UNSPEC = 0x0 + NFTA_QUOTA_BYTES = 0x1 + NFTA_QUOTA_FLAGS = 0x2 + NFTA_QUOTA_PAD = 0x3 + NFTA_QUOTA_CONSUMED = 0x4 + NFT_REJECT_ICMP_UNREACH = 0x0 + NFT_REJECT_TCP_RST = 0x1 + NFT_REJECT_ICMPX_UNREACH = 0x2 + NFT_REJECT_ICMPX_NO_ROUTE = 0x0 + NFT_REJECT_ICMPX_PORT_UNREACH = 0x1 + NFT_REJECT_ICMPX_HOST_UNREACH = 0x2 + NFT_REJECT_ICMPX_ADMIN_PROHIBITED = 0x3 + NFTA_REJECT_UNSPEC = 0x0 + NFTA_REJECT_TYPE = 0x1 + NFTA_REJECT_ICMP_CODE = 0x2 + NFT_NAT_SNAT = 0x0 + NFT_NAT_DNAT = 0x1 + NFTA_NAT_UNSPEC = 0x0 + NFTA_NAT_TYPE = 0x1 + NFTA_NAT_FAMILY = 0x2 + NFTA_NAT_REG_ADDR_MIN = 0x3 + NFTA_NAT_REG_ADDR_MAX = 0x4 + NFTA_NAT_REG_PROTO_MIN = 0x5 + NFTA_NAT_REG_PROTO_MAX = 0x6 + NFTA_NAT_FLAGS = 0x7 + NFTA_MASQ_UNSPEC = 0x0 + NFTA_MASQ_FLAGS = 0x1 + NFTA_MASQ_REG_PROTO_MIN = 0x2 + NFTA_MASQ_REG_PROTO_MAX = 0x3 + NFTA_REDIR_UNSPEC = 0x0 + NFTA_REDIR_REG_PROTO_MIN = 0x1 + NFTA_REDIR_REG_PROTO_MAX = 0x2 + NFTA_REDIR_FLAGS = 0x3 + NFTA_DUP_UNSPEC = 0x0 + NFTA_DUP_SREG_ADDR = 0x1 + NFTA_DUP_SREG_DEV = 0x2 + NFTA_FWD_UNSPEC = 0x0 + NFTA_FWD_SREG_DEV = 0x1 + NFTA_OBJREF_UNSPEC = 0x0 + NFTA_OBJREF_IMM_TYPE = 0x1 + NFTA_OBJREF_IMM_NAME = 0x2 + NFTA_OBJREF_SET_SREG = 0x3 + NFTA_OBJREF_SET_NAME = 0x4 + NFTA_OBJREF_SET_ID = 0x5 + NFTA_GEN_UNSPEC = 0x0 + NFTA_GEN_ID = 0x1 + NFTA_GEN_PROC_PID = 0x2 + NFTA_GEN_PROC_NAME = 0x3 + NFTA_FIB_UNSPEC = 0x0 + NFTA_FIB_DREG = 0x1 + NFTA_FIB_RESULT = 0x2 + NFTA_FIB_FLAGS = 0x3 + NFT_FIB_RESULT_UNSPEC = 0x0 + NFT_FIB_RESULT_OIF = 0x1 + NFT_FIB_RESULT_OIFNAME = 0x2 + NFT_FIB_RESULT_ADDRTYPE = 0x3 + NFTA_FIB_F_SADDR = 0x1 + NFTA_FIB_F_DADDR = 0x2 + NFTA_FIB_F_MARK = 0x4 + NFTA_FIB_F_IIF = 0x8 + NFTA_FIB_F_OIF = 0x10 + NFTA_FIB_F_PRESENT = 0x20 + NFTA_CT_HELPER_UNSPEC = 0x0 + NFTA_CT_HELPER_NAME = 0x1 + NFTA_CT_HELPER_L3PROTO = 0x2 + NFTA_CT_HELPER_L4PROTO = 0x3 + NFTA_OBJ_UNSPEC = 0x0 + NFTA_OBJ_TABLE = 0x1 + NFTA_OBJ_NAME = 0x2 + NFTA_OBJ_TYPE = 0x3 + NFTA_OBJ_DATA = 0x4 + NFTA_OBJ_USE = 0x5 + NFTA_TRACE_UNSPEC = 0x0 + NFTA_TRACE_TABLE = 0x1 + NFTA_TRACE_CHAIN = 0x2 + NFTA_TRACE_RULE_HANDLE = 0x3 + NFTA_TRACE_TYPE = 0x4 + NFTA_TRACE_VERDICT = 0x5 + NFTA_TRACE_ID = 0x6 + NFTA_TRACE_LL_HEADER = 0x7 + NFTA_TRACE_NETWORK_HEADER = 0x8 + NFTA_TRACE_TRANSPORT_HEADER = 0x9 + NFTA_TRACE_IIF = 0xa + NFTA_TRACE_IIFTYPE = 0xb + NFTA_TRACE_OIF = 0xc + NFTA_TRACE_OIFTYPE = 0xd + NFTA_TRACE_MARK = 0xe + NFTA_TRACE_NFPROTO = 0xf + NFTA_TRACE_POLICY = 0x10 + NFTA_TRACE_PAD = 0x11 + NFT_TRACETYPE_UNSPEC = 0x0 + NFT_TRACETYPE_POLICY = 0x1 + NFT_TRACETYPE_RETURN = 0x2 + NFT_TRACETYPE_RULE = 0x3 + NFTA_NG_UNSPEC = 0x0 + NFTA_NG_DREG = 0x1 + NFTA_NG_MODULUS = 0x2 + NFTA_NG_TYPE = 0x3 + NFTA_NG_OFFSET = 0x4 + NFT_NG_INCREMENTAL = 0x0 + NFT_NG_RANDOM = 0x1 +) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go index 87d0a8b1b08..e0a3932232e 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go @@ -115,27 +115,10 @@ type Stat_t struct { Pad5 [14]int32 } -type Statfs_t struct { - Type int32 - Bsize int32 - Frsize int32 - _ [4]byte - Blocks uint64 - Bfree uint64 - Files uint64 - Ffree uint64 - Bavail uint64 - Fsid Fsid - Namelen int32 - Flags int32 - Spare [5]int32 - _ [4]byte -} - type StatxTimestamp struct { - Sec int64 - Nsec uint32 - X__reserved int32 + Sec int64 + Nsec uint32 + _ int32 } type Statx_t struct { @@ -172,7 +155,7 @@ type Dirent struct { } type Fsid struct { - X__val [2]int32 + Val [2]int32 } type Flock_t struct { @@ -494,7 +477,7 @@ const ( IFLA_EVENT = 0x2c IFLA_NEW_NETNSID = 0x2d IFLA_IF_NETNSID = 0x2e - IFLA_MAX = 0x2e + IFLA_MAX = 0x31 RT_SCOPE_UNIVERSE = 0x0 RT_SCOPE_SITE = 0xc8 RT_SCOPE_LINK = 0xfd @@ -586,12 +569,12 @@ type RtAttr struct { } type IfInfomsg struct { - Family uint8 - X__ifi_pad uint8 - Type uint16 - Index int32 - Flags uint32 - Change uint32 + Family uint8 + _ uint8 + Type uint16 + Index int32 + Flags uint32 + Change uint32 } type IfAddrmsg struct { @@ -676,7 +659,7 @@ type Sysinfo_t struct { Totalhigh uint32 Freehigh uint32 Unit uint32 - X_f [8]int8 + _ [8]int8 } type Utsname struct { @@ -733,7 +716,7 @@ const ( ) type Sigset_t struct { - X__val [32]uint32 + Val [32]uint32 } const RNDGETENTCNT = 0x40045200 @@ -889,3 +872,938 @@ const ( BDADDR_LE_PUBLIC = 0x1 BDADDR_LE_RANDOM = 0x2 ) + +type PerfEventAttr struct { + Type uint32 + Size uint32 + Config uint64 + Sample uint64 + Sample_type uint64 + Read_format uint64 + Bits uint64 + Wakeup uint32 + Bp_type uint32 + Ext1 uint64 + Ext2 uint64 + Branch_sample_type uint64 + Sample_regs_user uint64 + Sample_stack_user uint32 + Clockid int32 + Sample_regs_intr uint64 + Aux_watermark uint32 + _ uint32 +} + +type PerfEventMmapPage struct { + Version uint32 + Compat_version uint32 + Lock uint32 + Index uint32 + Offset int64 + Time_enabled uint64 + Time_running uint64 + Capabilities uint64 + Pmc_width uint16 + Time_shift uint16 + Time_mult uint32 + Time_offset uint64 + Time_zero uint64 + Size uint32 + _ [948]uint8 + Data_head uint64 + Data_tail uint64 + Data_offset uint64 + Data_size uint64 + Aux_head uint64 + Aux_tail uint64 + Aux_offset uint64 + Aux_size uint64 +} + +const ( + PerfBitDisabled uint64 = CBitFieldMaskBit0 + PerfBitInherit = CBitFieldMaskBit1 + PerfBitPinned = CBitFieldMaskBit2 + PerfBitExclusive = CBitFieldMaskBit3 + PerfBitExcludeUser = CBitFieldMaskBit4 + PerfBitExcludeKernel = CBitFieldMaskBit5 + PerfBitExcludeHv = CBitFieldMaskBit6 + PerfBitExcludeIdle = CBitFieldMaskBit7 + PerfBitMmap = CBitFieldMaskBit8 + PerfBitComm = CBitFieldMaskBit9 + PerfBitFreq = CBitFieldMaskBit10 + PerfBitInheritStat = CBitFieldMaskBit11 + PerfBitEnableOnExec = CBitFieldMaskBit12 + PerfBitTask = CBitFieldMaskBit13 + PerfBitWatermark = CBitFieldMaskBit14 + PerfBitPreciseIPBit1 = CBitFieldMaskBit15 + PerfBitPreciseIPBit2 = CBitFieldMaskBit16 + PerfBitMmapData = CBitFieldMaskBit17 + PerfBitSampleIDAll = CBitFieldMaskBit18 + PerfBitExcludeHost = CBitFieldMaskBit19 + PerfBitExcludeGuest = CBitFieldMaskBit20 + PerfBitExcludeCallchainKernel = CBitFieldMaskBit21 + PerfBitExcludeCallchainUser = CBitFieldMaskBit22 + PerfBitMmap2 = CBitFieldMaskBit23 + PerfBitCommExec = CBitFieldMaskBit24 + PerfBitUseClockID = CBitFieldMaskBit25 + PerfBitContextSwitch = CBitFieldMaskBit26 +) + +const ( + PERF_TYPE_HARDWARE = 0x0 + PERF_TYPE_SOFTWARE = 0x1 + PERF_TYPE_TRACEPOINT = 0x2 + PERF_TYPE_HW_CACHE = 0x3 + PERF_TYPE_RAW = 0x4 + PERF_TYPE_BREAKPOINT = 0x5 + + PERF_COUNT_HW_CPU_CYCLES = 0x0 + PERF_COUNT_HW_INSTRUCTIONS = 0x1 + PERF_COUNT_HW_CACHE_REFERENCES = 0x2 + PERF_COUNT_HW_CACHE_MISSES = 0x3 + PERF_COUNT_HW_BRANCH_INSTRUCTIONS = 0x4 + PERF_COUNT_HW_BRANCH_MISSES = 0x5 + PERF_COUNT_HW_BUS_CYCLES = 0x6 + PERF_COUNT_HW_STALLED_CYCLES_FRONTEND = 0x7 + PERF_COUNT_HW_STALLED_CYCLES_BACKEND = 0x8 + PERF_COUNT_HW_REF_CPU_CYCLES = 0x9 + + PERF_COUNT_HW_CACHE_L1D = 0x0 + PERF_COUNT_HW_CACHE_L1I = 0x1 + PERF_COUNT_HW_CACHE_LL = 0x2 + PERF_COUNT_HW_CACHE_DTLB = 0x3 + PERF_COUNT_HW_CACHE_ITLB = 0x4 + PERF_COUNT_HW_CACHE_BPU = 0x5 + PERF_COUNT_HW_CACHE_NODE = 0x6 + + PERF_COUNT_HW_CACHE_OP_READ = 0x0 + PERF_COUNT_HW_CACHE_OP_WRITE = 0x1 + PERF_COUNT_HW_CACHE_OP_PREFETCH = 0x2 + + PERF_COUNT_HW_CACHE_RESULT_ACCESS = 0x0 + PERF_COUNT_HW_CACHE_RESULT_MISS = 0x1 + + PERF_COUNT_SW_CPU_CLOCK = 0x0 + PERF_COUNT_SW_TASK_CLOCK = 0x1 + PERF_COUNT_SW_PAGE_FAULTS = 0x2 + PERF_COUNT_SW_CONTEXT_SWITCHES = 0x3 + PERF_COUNT_SW_CPU_MIGRATIONS = 0x4 + PERF_COUNT_SW_PAGE_FAULTS_MIN = 0x5 + PERF_COUNT_SW_PAGE_FAULTS_MAJ = 0x6 + PERF_COUNT_SW_ALIGNMENT_FAULTS = 0x7 + PERF_COUNT_SW_EMULATION_FAULTS = 0x8 + PERF_COUNT_SW_DUMMY = 0x9 + + PERF_SAMPLE_IP = 0x1 + PERF_SAMPLE_TID = 0x2 + PERF_SAMPLE_TIME = 0x4 + PERF_SAMPLE_ADDR = 0x8 + PERF_SAMPLE_READ = 0x10 + PERF_SAMPLE_CALLCHAIN = 0x20 + PERF_SAMPLE_ID = 0x40 + PERF_SAMPLE_CPU = 0x80 + PERF_SAMPLE_PERIOD = 0x100 + PERF_SAMPLE_STREAM_ID = 0x200 + PERF_SAMPLE_RAW = 0x400 + PERF_SAMPLE_BRANCH_STACK = 0x800 + + PERF_SAMPLE_BRANCH_USER = 0x1 + PERF_SAMPLE_BRANCH_KERNEL = 0x2 + PERF_SAMPLE_BRANCH_HV = 0x4 + PERF_SAMPLE_BRANCH_ANY = 0x8 + PERF_SAMPLE_BRANCH_ANY_CALL = 0x10 + PERF_SAMPLE_BRANCH_ANY_RETURN = 0x20 + PERF_SAMPLE_BRANCH_IND_CALL = 0x40 + + PERF_FORMAT_TOTAL_TIME_ENABLED = 0x1 + PERF_FORMAT_TOTAL_TIME_RUNNING = 0x2 + PERF_FORMAT_ID = 0x4 + PERF_FORMAT_GROUP = 0x8 + + PERF_RECORD_MMAP = 0x1 + PERF_RECORD_LOST = 0x2 + PERF_RECORD_COMM = 0x3 + PERF_RECORD_EXIT = 0x4 + PERF_RECORD_THROTTLE = 0x5 + PERF_RECORD_UNTHROTTLE = 0x6 + PERF_RECORD_FORK = 0x7 + PERF_RECORD_READ = 0x8 + PERF_RECORD_SAMPLE = 0x9 + + PERF_CONTEXT_HV = -0x20 + PERF_CONTEXT_KERNEL = -0x80 + PERF_CONTEXT_USER = -0x200 + + PERF_CONTEXT_GUEST = -0x800 + PERF_CONTEXT_GUEST_KERNEL = -0x880 + PERF_CONTEXT_GUEST_USER = -0xa00 + + PERF_FLAG_FD_NO_GROUP = 0x1 + PERF_FLAG_FD_OUTPUT = 0x2 + PERF_FLAG_PID_CGROUP = 0x4 +) + +const ( + CBitFieldMaskBit0 = 0x8000000000000000 + CBitFieldMaskBit1 = 0x4000000000000000 + CBitFieldMaskBit2 = 0x2000000000000000 + CBitFieldMaskBit3 = 0x1000000000000000 + CBitFieldMaskBit4 = 0x800000000000000 + CBitFieldMaskBit5 = 0x400000000000000 + CBitFieldMaskBit6 = 0x200000000000000 + CBitFieldMaskBit7 = 0x100000000000000 + CBitFieldMaskBit8 = 0x80000000000000 + CBitFieldMaskBit9 = 0x40000000000000 + CBitFieldMaskBit10 = 0x20000000000000 + CBitFieldMaskBit11 = 0x10000000000000 + CBitFieldMaskBit12 = 0x8000000000000 + CBitFieldMaskBit13 = 0x4000000000000 + CBitFieldMaskBit14 = 0x2000000000000 + CBitFieldMaskBit15 = 0x1000000000000 + CBitFieldMaskBit16 = 0x800000000000 + CBitFieldMaskBit17 = 0x400000000000 + CBitFieldMaskBit18 = 0x200000000000 + CBitFieldMaskBit19 = 0x100000000000 + CBitFieldMaskBit20 = 0x80000000000 + CBitFieldMaskBit21 = 0x40000000000 + CBitFieldMaskBit22 = 0x20000000000 + CBitFieldMaskBit23 = 0x10000000000 + CBitFieldMaskBit24 = 0x8000000000 + CBitFieldMaskBit25 = 0x4000000000 + CBitFieldMaskBit26 = 0x2000000000 + CBitFieldMaskBit27 = 0x1000000000 + CBitFieldMaskBit28 = 0x800000000 + CBitFieldMaskBit29 = 0x400000000 + CBitFieldMaskBit30 = 0x200000000 + CBitFieldMaskBit31 = 0x100000000 + CBitFieldMaskBit32 = 0x80000000 + CBitFieldMaskBit33 = 0x40000000 + CBitFieldMaskBit34 = 0x20000000 + CBitFieldMaskBit35 = 0x10000000 + CBitFieldMaskBit36 = 0x8000000 + CBitFieldMaskBit37 = 0x4000000 + CBitFieldMaskBit38 = 0x2000000 + CBitFieldMaskBit39 = 0x1000000 + CBitFieldMaskBit40 = 0x800000 + CBitFieldMaskBit41 = 0x400000 + CBitFieldMaskBit42 = 0x200000 + CBitFieldMaskBit43 = 0x100000 + CBitFieldMaskBit44 = 0x80000 + CBitFieldMaskBit45 = 0x40000 + CBitFieldMaskBit46 = 0x20000 + CBitFieldMaskBit47 = 0x10000 + CBitFieldMaskBit48 = 0x8000 + CBitFieldMaskBit49 = 0x4000 + CBitFieldMaskBit50 = 0x2000 + CBitFieldMaskBit51 = 0x1000 + CBitFieldMaskBit52 = 0x800 + CBitFieldMaskBit53 = 0x400 + CBitFieldMaskBit54 = 0x200 + CBitFieldMaskBit55 = 0x100 + CBitFieldMaskBit56 = 0x80 + CBitFieldMaskBit57 = 0x40 + CBitFieldMaskBit58 = 0x20 + CBitFieldMaskBit59 = 0x10 + CBitFieldMaskBit60 = 0x8 + CBitFieldMaskBit61 = 0x4 + CBitFieldMaskBit62 = 0x2 + CBitFieldMaskBit63 = 0x1 +) + +type SockaddrStorage struct { + Family uint16 + _ [122]int8 + _ uint32 +} + +type TCPMD5Sig struct { + Addr SockaddrStorage + Flags uint8 + Prefixlen uint8 + Keylen uint16 + _ uint32 + Key [80]uint8 +} + +type HDDriveCmdHdr struct { + Command uint8 + Number uint8 + Feature uint8 + Count uint8 +} + +type HDGeometry struct { + Heads uint8 + Sectors uint8 + Cylinders uint16 + Start uint32 +} + +type HDDriveID struct { + Config uint16 + Cyls uint16 + Reserved2 uint16 + Heads uint16 + Track_bytes uint16 + Sector_bytes uint16 + Sectors uint16 + Vendor0 uint16 + Vendor1 uint16 + Vendor2 uint16 + Serial_no [20]uint8 + Buf_type uint16 + Buf_size uint16 + Ecc_bytes uint16 + Fw_rev [8]uint8 + Model [40]uint8 + Max_multsect uint8 + Vendor3 uint8 + Dword_io uint16 + Vendor4 uint8 + Capability uint8 + Reserved50 uint16 + Vendor5 uint8 + TPIO uint8 + Vendor6 uint8 + TDMA uint8 + Field_valid uint16 + Cur_cyls uint16 + Cur_heads uint16 + Cur_sectors uint16 + Cur_capacity0 uint16 + Cur_capacity1 uint16 + Multsect uint8 + Multsect_valid uint8 + Lba_capacity uint32 + Dma_1word uint16 + Dma_mword uint16 + Eide_pio_modes uint16 + Eide_dma_min uint16 + Eide_dma_time uint16 + Eide_pio uint16 + Eide_pio_iordy uint16 + Words69_70 [2]uint16 + Words71_74 [4]uint16 + Queue_depth uint16 + Words76_79 [4]uint16 + Major_rev_num uint16 + Minor_rev_num uint16 + Command_set_1 uint16 + Command_set_2 uint16 + Cfsse uint16 + Cfs_enable_1 uint16 + Cfs_enable_2 uint16 + Csf_default uint16 + Dma_ultra uint16 + Trseuc uint16 + TrsEuc uint16 + CurAPMvalues uint16 + Mprc uint16 + Hw_config uint16 + Acoustic uint16 + Msrqs uint16 + Sxfert uint16 + Sal uint16 + Spg uint32 + Lba_capacity_2 uint64 + Words104_125 [22]uint16 + Last_lun uint16 + Word127 uint16 + Dlf uint16 + Csfo uint16 + Words130_155 [26]uint16 + Word156 uint16 + Words157_159 [3]uint16 + Cfa_power uint16 + Words161_175 [15]uint16 + Words176_205 [30]uint16 + Words206_254 [49]uint16 + Integrity_word uint16 +} + +type Statfs_t struct { + Type int32 + Bsize int32 + Frsize int32 + _ [4]byte + Blocks uint64 + Bfree uint64 + Files uint64 + Ffree uint64 + Bavail uint64 + Fsid Fsid + Namelen int32 + Flags int32 + Spare [5]int32 + _ [4]byte +} + +const ( + ST_MANDLOCK = 0x40 + ST_NOATIME = 0x400 + ST_NODEV = 0x4 + ST_NODIRATIME = 0x800 + ST_NOEXEC = 0x8 + ST_NOSUID = 0x2 + ST_RDONLY = 0x1 + ST_RELATIME = 0x1000 + ST_SYNCHRONOUS = 0x10 +) + +type TpacketHdr struct { + Status uint32 + Len uint32 + Snaplen uint32 + Mac uint16 + Net uint16 + Sec uint32 + Usec uint32 +} + +type Tpacket2Hdr struct { + Status uint32 + Len uint32 + Snaplen uint32 + Mac uint16 + Net uint16 + Sec uint32 + Nsec uint32 + Vlan_tci uint16 + Vlan_tpid uint16 + _ [4]uint8 +} + +type Tpacket3Hdr struct { + Next_offset uint32 + Sec uint32 + Nsec uint32 + Snaplen uint32 + Len uint32 + Status uint32 + Mac uint16 + Net uint16 + Hv1 TpacketHdrVariant1 + _ [8]uint8 +} + +type TpacketHdrVariant1 struct { + Rxhash uint32 + Vlan_tci uint32 + Vlan_tpid uint16 + _ uint16 +} + +type TpacketBlockDesc struct { + Version uint32 + To_priv uint32 + Hdr [40]byte +} + +type TpacketReq struct { + Block_size uint32 + Block_nr uint32 + Frame_size uint32 + Frame_nr uint32 +} + +type TpacketReq3 struct { + Block_size uint32 + Block_nr uint32 + Frame_size uint32 + Frame_nr uint32 + Retire_blk_tov uint32 + Sizeof_priv uint32 + Feature_req_word uint32 +} + +type TpacketStats struct { + Packets uint32 + Drops uint32 +} + +type TpacketStatsV3 struct { + Packets uint32 + Drops uint32 + Freeze_q_cnt uint32 +} + +type TpacketAuxdata struct { + Status uint32 + Len uint32 + Snaplen uint32 + Mac uint16 + Net uint16 + Vlan_tci uint16 + Vlan_tpid uint16 +} + +const ( + TPACKET_V1 = 0x0 + TPACKET_V2 = 0x1 + TPACKET_V3 = 0x2 +) + +const ( + SizeofTpacketHdr = 0x18 + SizeofTpacket2Hdr = 0x20 + SizeofTpacket3Hdr = 0x30 +) + +const ( + NF_INET_PRE_ROUTING = 0x0 + NF_INET_LOCAL_IN = 0x1 + NF_INET_FORWARD = 0x2 + NF_INET_LOCAL_OUT = 0x3 + NF_INET_POST_ROUTING = 0x4 + NF_INET_NUMHOOKS = 0x5 +) + +const ( + NF_NETDEV_INGRESS = 0x0 + NF_NETDEV_NUMHOOKS = 0x1 +) + +const ( + NFPROTO_UNSPEC = 0x0 + NFPROTO_INET = 0x1 + NFPROTO_IPV4 = 0x2 + NFPROTO_ARP = 0x3 + NFPROTO_NETDEV = 0x5 + NFPROTO_BRIDGE = 0x7 + NFPROTO_IPV6 = 0xa + NFPROTO_DECNET = 0xc + NFPROTO_NUMPROTO = 0xd +) + +type Nfgenmsg struct { + Nfgen_family uint8 + Version uint8 + Res_id uint16 +} + +const ( + NFNL_BATCH_UNSPEC = 0x0 + NFNL_BATCH_GENID = 0x1 +) + +const ( + NFT_REG_VERDICT = 0x0 + NFT_REG_1 = 0x1 + NFT_REG_2 = 0x2 + NFT_REG_3 = 0x3 + NFT_REG_4 = 0x4 + NFT_REG32_00 = 0x8 + NFT_REG32_01 = 0x9 + NFT_REG32_02 = 0xa + NFT_REG32_03 = 0xb + NFT_REG32_04 = 0xc + NFT_REG32_05 = 0xd + NFT_REG32_06 = 0xe + NFT_REG32_07 = 0xf + NFT_REG32_08 = 0x10 + NFT_REG32_09 = 0x11 + NFT_REG32_10 = 0x12 + NFT_REG32_11 = 0x13 + NFT_REG32_12 = 0x14 + NFT_REG32_13 = 0x15 + NFT_REG32_14 = 0x16 + NFT_REG32_15 = 0x17 + NFT_CONTINUE = -0x1 + NFT_BREAK = -0x2 + NFT_JUMP = -0x3 + NFT_GOTO = -0x4 + NFT_RETURN = -0x5 + NFT_MSG_NEWTABLE = 0x0 + NFT_MSG_GETTABLE = 0x1 + NFT_MSG_DELTABLE = 0x2 + NFT_MSG_NEWCHAIN = 0x3 + NFT_MSG_GETCHAIN = 0x4 + NFT_MSG_DELCHAIN = 0x5 + NFT_MSG_NEWRULE = 0x6 + NFT_MSG_GETRULE = 0x7 + NFT_MSG_DELRULE = 0x8 + NFT_MSG_NEWSET = 0x9 + NFT_MSG_GETSET = 0xa + NFT_MSG_DELSET = 0xb + NFT_MSG_NEWSETELEM = 0xc + NFT_MSG_GETSETELEM = 0xd + NFT_MSG_DELSETELEM = 0xe + NFT_MSG_NEWGEN = 0xf + NFT_MSG_GETGEN = 0x10 + NFT_MSG_TRACE = 0x11 + NFT_MSG_NEWOBJ = 0x12 + NFT_MSG_GETOBJ = 0x13 + NFT_MSG_DELOBJ = 0x14 + NFT_MSG_GETOBJ_RESET = 0x15 + NFT_MSG_MAX = 0x19 + NFTA_LIST_UNPEC = 0x0 + NFTA_LIST_ELEM = 0x1 + NFTA_HOOK_UNSPEC = 0x0 + NFTA_HOOK_HOOKNUM = 0x1 + NFTA_HOOK_PRIORITY = 0x2 + NFTA_HOOK_DEV = 0x3 + NFT_TABLE_F_DORMANT = 0x1 + NFTA_TABLE_UNSPEC = 0x0 + NFTA_TABLE_NAME = 0x1 + NFTA_TABLE_FLAGS = 0x2 + NFTA_TABLE_USE = 0x3 + NFTA_CHAIN_UNSPEC = 0x0 + NFTA_CHAIN_TABLE = 0x1 + NFTA_CHAIN_HANDLE = 0x2 + NFTA_CHAIN_NAME = 0x3 + NFTA_CHAIN_HOOK = 0x4 + NFTA_CHAIN_POLICY = 0x5 + NFTA_CHAIN_USE = 0x6 + NFTA_CHAIN_TYPE = 0x7 + NFTA_CHAIN_COUNTERS = 0x8 + NFTA_CHAIN_PAD = 0x9 + NFTA_RULE_UNSPEC = 0x0 + NFTA_RULE_TABLE = 0x1 + NFTA_RULE_CHAIN = 0x2 + NFTA_RULE_HANDLE = 0x3 + NFTA_RULE_EXPRESSIONS = 0x4 + NFTA_RULE_COMPAT = 0x5 + NFTA_RULE_POSITION = 0x6 + NFTA_RULE_USERDATA = 0x7 + NFTA_RULE_PAD = 0x8 + NFTA_RULE_ID = 0x9 + NFT_RULE_COMPAT_F_INV = 0x2 + NFT_RULE_COMPAT_F_MASK = 0x2 + NFTA_RULE_COMPAT_UNSPEC = 0x0 + NFTA_RULE_COMPAT_PROTO = 0x1 + NFTA_RULE_COMPAT_FLAGS = 0x2 + NFT_SET_ANONYMOUS = 0x1 + NFT_SET_CONSTANT = 0x2 + NFT_SET_INTERVAL = 0x4 + NFT_SET_MAP = 0x8 + NFT_SET_TIMEOUT = 0x10 + NFT_SET_EVAL = 0x20 + NFT_SET_OBJECT = 0x40 + NFT_SET_POL_PERFORMANCE = 0x0 + NFT_SET_POL_MEMORY = 0x1 + NFTA_SET_DESC_UNSPEC = 0x0 + NFTA_SET_DESC_SIZE = 0x1 + NFTA_SET_UNSPEC = 0x0 + NFTA_SET_TABLE = 0x1 + NFTA_SET_NAME = 0x2 + NFTA_SET_FLAGS = 0x3 + NFTA_SET_KEY_TYPE = 0x4 + NFTA_SET_KEY_LEN = 0x5 + NFTA_SET_DATA_TYPE = 0x6 + NFTA_SET_DATA_LEN = 0x7 + NFTA_SET_POLICY = 0x8 + NFTA_SET_DESC = 0x9 + NFTA_SET_ID = 0xa + NFTA_SET_TIMEOUT = 0xb + NFTA_SET_GC_INTERVAL = 0xc + NFTA_SET_USERDATA = 0xd + NFTA_SET_PAD = 0xe + NFTA_SET_OBJ_TYPE = 0xf + NFT_SET_ELEM_INTERVAL_END = 0x1 + NFTA_SET_ELEM_UNSPEC = 0x0 + NFTA_SET_ELEM_KEY = 0x1 + NFTA_SET_ELEM_DATA = 0x2 + NFTA_SET_ELEM_FLAGS = 0x3 + NFTA_SET_ELEM_TIMEOUT = 0x4 + NFTA_SET_ELEM_EXPIRATION = 0x5 + NFTA_SET_ELEM_USERDATA = 0x6 + NFTA_SET_ELEM_EXPR = 0x7 + NFTA_SET_ELEM_PAD = 0x8 + NFTA_SET_ELEM_OBJREF = 0x9 + NFTA_SET_ELEM_LIST_UNSPEC = 0x0 + NFTA_SET_ELEM_LIST_TABLE = 0x1 + NFTA_SET_ELEM_LIST_SET = 0x2 + NFTA_SET_ELEM_LIST_ELEMENTS = 0x3 + NFTA_SET_ELEM_LIST_SET_ID = 0x4 + NFT_DATA_VALUE = 0x0 + NFT_DATA_VERDICT = 0xffffff00 + NFTA_DATA_UNSPEC = 0x0 + NFTA_DATA_VALUE = 0x1 + NFTA_DATA_VERDICT = 0x2 + NFTA_VERDICT_UNSPEC = 0x0 + NFTA_VERDICT_CODE = 0x1 + NFTA_VERDICT_CHAIN = 0x2 + NFTA_EXPR_UNSPEC = 0x0 + NFTA_EXPR_NAME = 0x1 + NFTA_EXPR_DATA = 0x2 + NFTA_IMMEDIATE_UNSPEC = 0x0 + NFTA_IMMEDIATE_DREG = 0x1 + NFTA_IMMEDIATE_DATA = 0x2 + NFTA_BITWISE_UNSPEC = 0x0 + NFTA_BITWISE_SREG = 0x1 + NFTA_BITWISE_DREG = 0x2 + NFTA_BITWISE_LEN = 0x3 + NFTA_BITWISE_MASK = 0x4 + NFTA_BITWISE_XOR = 0x5 + NFT_BYTEORDER_NTOH = 0x0 + NFT_BYTEORDER_HTON = 0x1 + NFTA_BYTEORDER_UNSPEC = 0x0 + NFTA_BYTEORDER_SREG = 0x1 + NFTA_BYTEORDER_DREG = 0x2 + NFTA_BYTEORDER_OP = 0x3 + NFTA_BYTEORDER_LEN = 0x4 + NFTA_BYTEORDER_SIZE = 0x5 + NFT_CMP_EQ = 0x0 + NFT_CMP_NEQ = 0x1 + NFT_CMP_LT = 0x2 + NFT_CMP_LTE = 0x3 + NFT_CMP_GT = 0x4 + NFT_CMP_GTE = 0x5 + NFTA_CMP_UNSPEC = 0x0 + NFTA_CMP_SREG = 0x1 + NFTA_CMP_OP = 0x2 + NFTA_CMP_DATA = 0x3 + NFT_RANGE_EQ = 0x0 + NFT_RANGE_NEQ = 0x1 + NFTA_RANGE_UNSPEC = 0x0 + NFTA_RANGE_SREG = 0x1 + NFTA_RANGE_OP = 0x2 + NFTA_RANGE_FROM_DATA = 0x3 + NFTA_RANGE_TO_DATA = 0x4 + NFT_LOOKUP_F_INV = 0x1 + NFTA_LOOKUP_UNSPEC = 0x0 + NFTA_LOOKUP_SET = 0x1 + NFTA_LOOKUP_SREG = 0x2 + NFTA_LOOKUP_DREG = 0x3 + NFTA_LOOKUP_SET_ID = 0x4 + NFTA_LOOKUP_FLAGS = 0x5 + NFT_DYNSET_OP_ADD = 0x0 + NFT_DYNSET_OP_UPDATE = 0x1 + NFT_DYNSET_F_INV = 0x1 + NFTA_DYNSET_UNSPEC = 0x0 + NFTA_DYNSET_SET_NAME = 0x1 + NFTA_DYNSET_SET_ID = 0x2 + NFTA_DYNSET_OP = 0x3 + NFTA_DYNSET_SREG_KEY = 0x4 + NFTA_DYNSET_SREG_DATA = 0x5 + NFTA_DYNSET_TIMEOUT = 0x6 + NFTA_DYNSET_EXPR = 0x7 + NFTA_DYNSET_PAD = 0x8 + NFTA_DYNSET_FLAGS = 0x9 + NFT_PAYLOAD_LL_HEADER = 0x0 + NFT_PAYLOAD_NETWORK_HEADER = 0x1 + NFT_PAYLOAD_TRANSPORT_HEADER = 0x2 + NFT_PAYLOAD_CSUM_NONE = 0x0 + NFT_PAYLOAD_CSUM_INET = 0x1 + NFT_PAYLOAD_L4CSUM_PSEUDOHDR = 0x1 + NFTA_PAYLOAD_UNSPEC = 0x0 + NFTA_PAYLOAD_DREG = 0x1 + NFTA_PAYLOAD_BASE = 0x2 + NFTA_PAYLOAD_OFFSET = 0x3 + NFTA_PAYLOAD_LEN = 0x4 + NFTA_PAYLOAD_SREG = 0x5 + NFTA_PAYLOAD_CSUM_TYPE = 0x6 + NFTA_PAYLOAD_CSUM_OFFSET = 0x7 + NFTA_PAYLOAD_CSUM_FLAGS = 0x8 + NFT_EXTHDR_F_PRESENT = 0x1 + NFT_EXTHDR_OP_IPV6 = 0x0 + NFT_EXTHDR_OP_TCPOPT = 0x1 + NFTA_EXTHDR_UNSPEC = 0x0 + NFTA_EXTHDR_DREG = 0x1 + NFTA_EXTHDR_TYPE = 0x2 + NFTA_EXTHDR_OFFSET = 0x3 + NFTA_EXTHDR_LEN = 0x4 + NFTA_EXTHDR_FLAGS = 0x5 + NFTA_EXTHDR_OP = 0x6 + NFTA_EXTHDR_SREG = 0x7 + NFT_META_LEN = 0x0 + NFT_META_PROTOCOL = 0x1 + NFT_META_PRIORITY = 0x2 + NFT_META_MARK = 0x3 + NFT_META_IIF = 0x4 + NFT_META_OIF = 0x5 + NFT_META_IIFNAME = 0x6 + NFT_META_OIFNAME = 0x7 + NFT_META_IIFTYPE = 0x8 + NFT_META_OIFTYPE = 0x9 + NFT_META_SKUID = 0xa + NFT_META_SKGID = 0xb + NFT_META_NFTRACE = 0xc + NFT_META_RTCLASSID = 0xd + NFT_META_SECMARK = 0xe + NFT_META_NFPROTO = 0xf + NFT_META_L4PROTO = 0x10 + NFT_META_BRI_IIFNAME = 0x11 + NFT_META_BRI_OIFNAME = 0x12 + NFT_META_PKTTYPE = 0x13 + NFT_META_CPU = 0x14 + NFT_META_IIFGROUP = 0x15 + NFT_META_OIFGROUP = 0x16 + NFT_META_CGROUP = 0x17 + NFT_META_PRANDOM = 0x18 + NFT_RT_CLASSID = 0x0 + NFT_RT_NEXTHOP4 = 0x1 + NFT_RT_NEXTHOP6 = 0x2 + NFT_RT_TCPMSS = 0x3 + NFT_HASH_JENKINS = 0x0 + NFT_HASH_SYM = 0x1 + NFTA_HASH_UNSPEC = 0x0 + NFTA_HASH_SREG = 0x1 + NFTA_HASH_DREG = 0x2 + NFTA_HASH_LEN = 0x3 + NFTA_HASH_MODULUS = 0x4 + NFTA_HASH_SEED = 0x5 + NFTA_HASH_OFFSET = 0x6 + NFTA_HASH_TYPE = 0x7 + NFTA_META_UNSPEC = 0x0 + NFTA_META_DREG = 0x1 + NFTA_META_KEY = 0x2 + NFTA_META_SREG = 0x3 + NFTA_RT_UNSPEC = 0x0 + NFTA_RT_DREG = 0x1 + NFTA_RT_KEY = 0x2 + NFT_CT_STATE = 0x0 + NFT_CT_DIRECTION = 0x1 + NFT_CT_STATUS = 0x2 + NFT_CT_MARK = 0x3 + NFT_CT_SECMARK = 0x4 + NFT_CT_EXPIRATION = 0x5 + NFT_CT_HELPER = 0x6 + NFT_CT_L3PROTOCOL = 0x7 + NFT_CT_SRC = 0x8 + NFT_CT_DST = 0x9 + NFT_CT_PROTOCOL = 0xa + NFT_CT_PROTO_SRC = 0xb + NFT_CT_PROTO_DST = 0xc + NFT_CT_LABELS = 0xd + NFT_CT_PKTS = 0xe + NFT_CT_BYTES = 0xf + NFT_CT_AVGPKT = 0x10 + NFT_CT_ZONE = 0x11 + NFT_CT_EVENTMASK = 0x12 + NFTA_CT_UNSPEC = 0x0 + NFTA_CT_DREG = 0x1 + NFTA_CT_KEY = 0x2 + NFTA_CT_DIRECTION = 0x3 + NFTA_CT_SREG = 0x4 + NFT_LIMIT_PKTS = 0x0 + NFT_LIMIT_PKT_BYTES = 0x1 + NFT_LIMIT_F_INV = 0x1 + NFTA_LIMIT_UNSPEC = 0x0 + NFTA_LIMIT_RATE = 0x1 + NFTA_LIMIT_UNIT = 0x2 + NFTA_LIMIT_BURST = 0x3 + NFTA_LIMIT_TYPE = 0x4 + NFTA_LIMIT_FLAGS = 0x5 + NFTA_LIMIT_PAD = 0x6 + NFTA_COUNTER_UNSPEC = 0x0 + NFTA_COUNTER_BYTES = 0x1 + NFTA_COUNTER_PACKETS = 0x2 + NFTA_COUNTER_PAD = 0x3 + NFTA_LOG_UNSPEC = 0x0 + NFTA_LOG_GROUP = 0x1 + NFTA_LOG_PREFIX = 0x2 + NFTA_LOG_SNAPLEN = 0x3 + NFTA_LOG_QTHRESHOLD = 0x4 + NFTA_LOG_LEVEL = 0x5 + NFTA_LOG_FLAGS = 0x6 + NFTA_QUEUE_UNSPEC = 0x0 + NFTA_QUEUE_NUM = 0x1 + NFTA_QUEUE_TOTAL = 0x2 + NFTA_QUEUE_FLAGS = 0x3 + NFTA_QUEUE_SREG_QNUM = 0x4 + NFT_QUOTA_F_INV = 0x1 + NFT_QUOTA_F_DEPLETED = 0x2 + NFTA_QUOTA_UNSPEC = 0x0 + NFTA_QUOTA_BYTES = 0x1 + NFTA_QUOTA_FLAGS = 0x2 + NFTA_QUOTA_PAD = 0x3 + NFTA_QUOTA_CONSUMED = 0x4 + NFT_REJECT_ICMP_UNREACH = 0x0 + NFT_REJECT_TCP_RST = 0x1 + NFT_REJECT_ICMPX_UNREACH = 0x2 + NFT_REJECT_ICMPX_NO_ROUTE = 0x0 + NFT_REJECT_ICMPX_PORT_UNREACH = 0x1 + NFT_REJECT_ICMPX_HOST_UNREACH = 0x2 + NFT_REJECT_ICMPX_ADMIN_PROHIBITED = 0x3 + NFTA_REJECT_UNSPEC = 0x0 + NFTA_REJECT_TYPE = 0x1 + NFTA_REJECT_ICMP_CODE = 0x2 + NFT_NAT_SNAT = 0x0 + NFT_NAT_DNAT = 0x1 + NFTA_NAT_UNSPEC = 0x0 + NFTA_NAT_TYPE = 0x1 + NFTA_NAT_FAMILY = 0x2 + NFTA_NAT_REG_ADDR_MIN = 0x3 + NFTA_NAT_REG_ADDR_MAX = 0x4 + NFTA_NAT_REG_PROTO_MIN = 0x5 + NFTA_NAT_REG_PROTO_MAX = 0x6 + NFTA_NAT_FLAGS = 0x7 + NFTA_MASQ_UNSPEC = 0x0 + NFTA_MASQ_FLAGS = 0x1 + NFTA_MASQ_REG_PROTO_MIN = 0x2 + NFTA_MASQ_REG_PROTO_MAX = 0x3 + NFTA_REDIR_UNSPEC = 0x0 + NFTA_REDIR_REG_PROTO_MIN = 0x1 + NFTA_REDIR_REG_PROTO_MAX = 0x2 + NFTA_REDIR_FLAGS = 0x3 + NFTA_DUP_UNSPEC = 0x0 + NFTA_DUP_SREG_ADDR = 0x1 + NFTA_DUP_SREG_DEV = 0x2 + NFTA_FWD_UNSPEC = 0x0 + NFTA_FWD_SREG_DEV = 0x1 + NFTA_OBJREF_UNSPEC = 0x0 + NFTA_OBJREF_IMM_TYPE = 0x1 + NFTA_OBJREF_IMM_NAME = 0x2 + NFTA_OBJREF_SET_SREG = 0x3 + NFTA_OBJREF_SET_NAME = 0x4 + NFTA_OBJREF_SET_ID = 0x5 + NFTA_GEN_UNSPEC = 0x0 + NFTA_GEN_ID = 0x1 + NFTA_GEN_PROC_PID = 0x2 + NFTA_GEN_PROC_NAME = 0x3 + NFTA_FIB_UNSPEC = 0x0 + NFTA_FIB_DREG = 0x1 + NFTA_FIB_RESULT = 0x2 + NFTA_FIB_FLAGS = 0x3 + NFT_FIB_RESULT_UNSPEC = 0x0 + NFT_FIB_RESULT_OIF = 0x1 + NFT_FIB_RESULT_OIFNAME = 0x2 + NFT_FIB_RESULT_ADDRTYPE = 0x3 + NFTA_FIB_F_SADDR = 0x1 + NFTA_FIB_F_DADDR = 0x2 + NFTA_FIB_F_MARK = 0x4 + NFTA_FIB_F_IIF = 0x8 + NFTA_FIB_F_OIF = 0x10 + NFTA_FIB_F_PRESENT = 0x20 + NFTA_CT_HELPER_UNSPEC = 0x0 + NFTA_CT_HELPER_NAME = 0x1 + NFTA_CT_HELPER_L3PROTO = 0x2 + NFTA_CT_HELPER_L4PROTO = 0x3 + NFTA_OBJ_UNSPEC = 0x0 + NFTA_OBJ_TABLE = 0x1 + NFTA_OBJ_NAME = 0x2 + NFTA_OBJ_TYPE = 0x3 + NFTA_OBJ_DATA = 0x4 + NFTA_OBJ_USE = 0x5 + NFTA_TRACE_UNSPEC = 0x0 + NFTA_TRACE_TABLE = 0x1 + NFTA_TRACE_CHAIN = 0x2 + NFTA_TRACE_RULE_HANDLE = 0x3 + NFTA_TRACE_TYPE = 0x4 + NFTA_TRACE_VERDICT = 0x5 + NFTA_TRACE_ID = 0x6 + NFTA_TRACE_LL_HEADER = 0x7 + NFTA_TRACE_NETWORK_HEADER = 0x8 + NFTA_TRACE_TRANSPORT_HEADER = 0x9 + NFTA_TRACE_IIF = 0xa + NFTA_TRACE_IIFTYPE = 0xb + NFTA_TRACE_OIF = 0xc + NFTA_TRACE_OIFTYPE = 0xd + NFTA_TRACE_MARK = 0xe + NFTA_TRACE_NFPROTO = 0xf + NFTA_TRACE_POLICY = 0x10 + NFTA_TRACE_PAD = 0x11 + NFT_TRACETYPE_UNSPEC = 0x0 + NFT_TRACETYPE_POLICY = 0x1 + NFT_TRACETYPE_RETURN = 0x2 + NFT_TRACETYPE_RULE = 0x3 + NFTA_NG_UNSPEC = 0x0 + NFTA_NG_DREG = 0x1 + NFTA_NG_MODULUS = 0x2 + NFTA_NG_TYPE = 0x3 + NFTA_NG_OFFSET = 0x4 + NFT_NG_INCREMENTAL = 0x0 + NFT_NG_RANDOM = 0x1 +) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go index cf4e2bd297f..838135e24b7 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go @@ -117,25 +117,10 @@ type Stat_t struct { Blocks int64 } -type Statfs_t struct { - Type int64 - Bsize int64 - Frsize int64 - Blocks uint64 - Bfree uint64 - Files uint64 - Ffree uint64 - Bavail uint64 - Fsid Fsid - Namelen int64 - Flags int64 - Spare [5]int64 -} - type StatxTimestamp struct { - Sec int64 - Nsec uint32 - X__reserved int32 + Sec int64 + Nsec uint32 + _ int32 } type Statx_t struct { @@ -172,7 +157,7 @@ type Dirent struct { } type Fsid struct { - X__val [2]int32 + Val [2]int32 } type Flock_t struct { @@ -496,7 +481,7 @@ const ( IFLA_EVENT = 0x2c IFLA_NEW_NETNSID = 0x2d IFLA_IF_NETNSID = 0x2e - IFLA_MAX = 0x2e + IFLA_MAX = 0x31 RT_SCOPE_UNIVERSE = 0x0 RT_SCOPE_SITE = 0xc8 RT_SCOPE_LINK = 0xfd @@ -588,12 +573,12 @@ type RtAttr struct { } type IfInfomsg struct { - Family uint8 - X__ifi_pad uint8 - Type uint16 - Index int32 - Flags uint32 - Change uint32 + Family uint8 + _ uint8 + Type uint16 + Index int32 + Flags uint32 + Change uint32 } type IfAddrmsg struct { @@ -679,7 +664,7 @@ type Sysinfo_t struct { Totalhigh uint64 Freehigh uint64 Unit uint32 - X_f [0]int8 + _ [0]int8 _ [4]byte } @@ -738,7 +723,7 @@ const ( ) type Sigset_t struct { - X__val [16]uint64 + Val [16]uint64 } const RNDGETENTCNT = 0x40045200 @@ -894,3 +879,938 @@ const ( BDADDR_LE_PUBLIC = 0x1 BDADDR_LE_RANDOM = 0x2 ) + +type PerfEventAttr struct { + Type uint32 + Size uint32 + Config uint64 + Sample uint64 + Sample_type uint64 + Read_format uint64 + Bits uint64 + Wakeup uint32 + Bp_type uint32 + Ext1 uint64 + Ext2 uint64 + Branch_sample_type uint64 + Sample_regs_user uint64 + Sample_stack_user uint32 + Clockid int32 + Sample_regs_intr uint64 + Aux_watermark uint32 + _ uint32 +} + +type PerfEventMmapPage struct { + Version uint32 + Compat_version uint32 + Lock uint32 + Index uint32 + Offset int64 + Time_enabled uint64 + Time_running uint64 + Capabilities uint64 + Pmc_width uint16 + Time_shift uint16 + Time_mult uint32 + Time_offset uint64 + Time_zero uint64 + Size uint32 + _ [948]uint8 + Data_head uint64 + Data_tail uint64 + Data_offset uint64 + Data_size uint64 + Aux_head uint64 + Aux_tail uint64 + Aux_offset uint64 + Aux_size uint64 +} + +const ( + PerfBitDisabled uint64 = CBitFieldMaskBit0 + PerfBitInherit = CBitFieldMaskBit1 + PerfBitPinned = CBitFieldMaskBit2 + PerfBitExclusive = CBitFieldMaskBit3 + PerfBitExcludeUser = CBitFieldMaskBit4 + PerfBitExcludeKernel = CBitFieldMaskBit5 + PerfBitExcludeHv = CBitFieldMaskBit6 + PerfBitExcludeIdle = CBitFieldMaskBit7 + PerfBitMmap = CBitFieldMaskBit8 + PerfBitComm = CBitFieldMaskBit9 + PerfBitFreq = CBitFieldMaskBit10 + PerfBitInheritStat = CBitFieldMaskBit11 + PerfBitEnableOnExec = CBitFieldMaskBit12 + PerfBitTask = CBitFieldMaskBit13 + PerfBitWatermark = CBitFieldMaskBit14 + PerfBitPreciseIPBit1 = CBitFieldMaskBit15 + PerfBitPreciseIPBit2 = CBitFieldMaskBit16 + PerfBitMmapData = CBitFieldMaskBit17 + PerfBitSampleIDAll = CBitFieldMaskBit18 + PerfBitExcludeHost = CBitFieldMaskBit19 + PerfBitExcludeGuest = CBitFieldMaskBit20 + PerfBitExcludeCallchainKernel = CBitFieldMaskBit21 + PerfBitExcludeCallchainUser = CBitFieldMaskBit22 + PerfBitMmap2 = CBitFieldMaskBit23 + PerfBitCommExec = CBitFieldMaskBit24 + PerfBitUseClockID = CBitFieldMaskBit25 + PerfBitContextSwitch = CBitFieldMaskBit26 +) + +const ( + PERF_TYPE_HARDWARE = 0x0 + PERF_TYPE_SOFTWARE = 0x1 + PERF_TYPE_TRACEPOINT = 0x2 + PERF_TYPE_HW_CACHE = 0x3 + PERF_TYPE_RAW = 0x4 + PERF_TYPE_BREAKPOINT = 0x5 + + PERF_COUNT_HW_CPU_CYCLES = 0x0 + PERF_COUNT_HW_INSTRUCTIONS = 0x1 + PERF_COUNT_HW_CACHE_REFERENCES = 0x2 + PERF_COUNT_HW_CACHE_MISSES = 0x3 + PERF_COUNT_HW_BRANCH_INSTRUCTIONS = 0x4 + PERF_COUNT_HW_BRANCH_MISSES = 0x5 + PERF_COUNT_HW_BUS_CYCLES = 0x6 + PERF_COUNT_HW_STALLED_CYCLES_FRONTEND = 0x7 + PERF_COUNT_HW_STALLED_CYCLES_BACKEND = 0x8 + PERF_COUNT_HW_REF_CPU_CYCLES = 0x9 + + PERF_COUNT_HW_CACHE_L1D = 0x0 + PERF_COUNT_HW_CACHE_L1I = 0x1 + PERF_COUNT_HW_CACHE_LL = 0x2 + PERF_COUNT_HW_CACHE_DTLB = 0x3 + PERF_COUNT_HW_CACHE_ITLB = 0x4 + PERF_COUNT_HW_CACHE_BPU = 0x5 + PERF_COUNT_HW_CACHE_NODE = 0x6 + + PERF_COUNT_HW_CACHE_OP_READ = 0x0 + PERF_COUNT_HW_CACHE_OP_WRITE = 0x1 + PERF_COUNT_HW_CACHE_OP_PREFETCH = 0x2 + + PERF_COUNT_HW_CACHE_RESULT_ACCESS = 0x0 + PERF_COUNT_HW_CACHE_RESULT_MISS = 0x1 + + PERF_COUNT_SW_CPU_CLOCK = 0x0 + PERF_COUNT_SW_TASK_CLOCK = 0x1 + PERF_COUNT_SW_PAGE_FAULTS = 0x2 + PERF_COUNT_SW_CONTEXT_SWITCHES = 0x3 + PERF_COUNT_SW_CPU_MIGRATIONS = 0x4 + PERF_COUNT_SW_PAGE_FAULTS_MIN = 0x5 + PERF_COUNT_SW_PAGE_FAULTS_MAJ = 0x6 + PERF_COUNT_SW_ALIGNMENT_FAULTS = 0x7 + PERF_COUNT_SW_EMULATION_FAULTS = 0x8 + PERF_COUNT_SW_DUMMY = 0x9 + + PERF_SAMPLE_IP = 0x1 + PERF_SAMPLE_TID = 0x2 + PERF_SAMPLE_TIME = 0x4 + PERF_SAMPLE_ADDR = 0x8 + PERF_SAMPLE_READ = 0x10 + PERF_SAMPLE_CALLCHAIN = 0x20 + PERF_SAMPLE_ID = 0x40 + PERF_SAMPLE_CPU = 0x80 + PERF_SAMPLE_PERIOD = 0x100 + PERF_SAMPLE_STREAM_ID = 0x200 + PERF_SAMPLE_RAW = 0x400 + PERF_SAMPLE_BRANCH_STACK = 0x800 + + PERF_SAMPLE_BRANCH_USER = 0x1 + PERF_SAMPLE_BRANCH_KERNEL = 0x2 + PERF_SAMPLE_BRANCH_HV = 0x4 + PERF_SAMPLE_BRANCH_ANY = 0x8 + PERF_SAMPLE_BRANCH_ANY_CALL = 0x10 + PERF_SAMPLE_BRANCH_ANY_RETURN = 0x20 + PERF_SAMPLE_BRANCH_IND_CALL = 0x40 + + PERF_FORMAT_TOTAL_TIME_ENABLED = 0x1 + PERF_FORMAT_TOTAL_TIME_RUNNING = 0x2 + PERF_FORMAT_ID = 0x4 + PERF_FORMAT_GROUP = 0x8 + + PERF_RECORD_MMAP = 0x1 + PERF_RECORD_LOST = 0x2 + PERF_RECORD_COMM = 0x3 + PERF_RECORD_EXIT = 0x4 + PERF_RECORD_THROTTLE = 0x5 + PERF_RECORD_UNTHROTTLE = 0x6 + PERF_RECORD_FORK = 0x7 + PERF_RECORD_READ = 0x8 + PERF_RECORD_SAMPLE = 0x9 + + PERF_CONTEXT_HV = -0x20 + PERF_CONTEXT_KERNEL = -0x80 + PERF_CONTEXT_USER = -0x200 + + PERF_CONTEXT_GUEST = -0x800 + PERF_CONTEXT_GUEST_KERNEL = -0x880 + PERF_CONTEXT_GUEST_USER = -0xa00 + + PERF_FLAG_FD_NO_GROUP = 0x1 + PERF_FLAG_FD_OUTPUT = 0x2 + PERF_FLAG_PID_CGROUP = 0x4 +) + +const ( + CBitFieldMaskBit0 = 0x8000000000000000 + CBitFieldMaskBit1 = 0x4000000000000000 + CBitFieldMaskBit2 = 0x2000000000000000 + CBitFieldMaskBit3 = 0x1000000000000000 + CBitFieldMaskBit4 = 0x800000000000000 + CBitFieldMaskBit5 = 0x400000000000000 + CBitFieldMaskBit6 = 0x200000000000000 + CBitFieldMaskBit7 = 0x100000000000000 + CBitFieldMaskBit8 = 0x80000000000000 + CBitFieldMaskBit9 = 0x40000000000000 + CBitFieldMaskBit10 = 0x20000000000000 + CBitFieldMaskBit11 = 0x10000000000000 + CBitFieldMaskBit12 = 0x8000000000000 + CBitFieldMaskBit13 = 0x4000000000000 + CBitFieldMaskBit14 = 0x2000000000000 + CBitFieldMaskBit15 = 0x1000000000000 + CBitFieldMaskBit16 = 0x800000000000 + CBitFieldMaskBit17 = 0x400000000000 + CBitFieldMaskBit18 = 0x200000000000 + CBitFieldMaskBit19 = 0x100000000000 + CBitFieldMaskBit20 = 0x80000000000 + CBitFieldMaskBit21 = 0x40000000000 + CBitFieldMaskBit22 = 0x20000000000 + CBitFieldMaskBit23 = 0x10000000000 + CBitFieldMaskBit24 = 0x8000000000 + CBitFieldMaskBit25 = 0x4000000000 + CBitFieldMaskBit26 = 0x2000000000 + CBitFieldMaskBit27 = 0x1000000000 + CBitFieldMaskBit28 = 0x800000000 + CBitFieldMaskBit29 = 0x400000000 + CBitFieldMaskBit30 = 0x200000000 + CBitFieldMaskBit31 = 0x100000000 + CBitFieldMaskBit32 = 0x80000000 + CBitFieldMaskBit33 = 0x40000000 + CBitFieldMaskBit34 = 0x20000000 + CBitFieldMaskBit35 = 0x10000000 + CBitFieldMaskBit36 = 0x8000000 + CBitFieldMaskBit37 = 0x4000000 + CBitFieldMaskBit38 = 0x2000000 + CBitFieldMaskBit39 = 0x1000000 + CBitFieldMaskBit40 = 0x800000 + CBitFieldMaskBit41 = 0x400000 + CBitFieldMaskBit42 = 0x200000 + CBitFieldMaskBit43 = 0x100000 + CBitFieldMaskBit44 = 0x80000 + CBitFieldMaskBit45 = 0x40000 + CBitFieldMaskBit46 = 0x20000 + CBitFieldMaskBit47 = 0x10000 + CBitFieldMaskBit48 = 0x8000 + CBitFieldMaskBit49 = 0x4000 + CBitFieldMaskBit50 = 0x2000 + CBitFieldMaskBit51 = 0x1000 + CBitFieldMaskBit52 = 0x800 + CBitFieldMaskBit53 = 0x400 + CBitFieldMaskBit54 = 0x200 + CBitFieldMaskBit55 = 0x100 + CBitFieldMaskBit56 = 0x80 + CBitFieldMaskBit57 = 0x40 + CBitFieldMaskBit58 = 0x20 + CBitFieldMaskBit59 = 0x10 + CBitFieldMaskBit60 = 0x8 + CBitFieldMaskBit61 = 0x4 + CBitFieldMaskBit62 = 0x2 + CBitFieldMaskBit63 = 0x1 +) + +type SockaddrStorage struct { + Family uint16 + _ [118]int8 + _ uint64 +} + +type TCPMD5Sig struct { + Addr SockaddrStorage + Flags uint8 + Prefixlen uint8 + Keylen uint16 + _ uint32 + Key [80]uint8 +} + +type HDDriveCmdHdr struct { + Command uint8 + Number uint8 + Feature uint8 + Count uint8 +} + +type HDGeometry struct { + Heads uint8 + Sectors uint8 + Cylinders uint16 + _ [4]byte + Start uint64 +} + +type HDDriveID struct { + Config uint16 + Cyls uint16 + Reserved2 uint16 + Heads uint16 + Track_bytes uint16 + Sector_bytes uint16 + Sectors uint16 + Vendor0 uint16 + Vendor1 uint16 + Vendor2 uint16 + Serial_no [20]uint8 + Buf_type uint16 + Buf_size uint16 + Ecc_bytes uint16 + Fw_rev [8]uint8 + Model [40]uint8 + Max_multsect uint8 + Vendor3 uint8 + Dword_io uint16 + Vendor4 uint8 + Capability uint8 + Reserved50 uint16 + Vendor5 uint8 + TPIO uint8 + Vendor6 uint8 + TDMA uint8 + Field_valid uint16 + Cur_cyls uint16 + Cur_heads uint16 + Cur_sectors uint16 + Cur_capacity0 uint16 + Cur_capacity1 uint16 + Multsect uint8 + Multsect_valid uint8 + Lba_capacity uint32 + Dma_1word uint16 + Dma_mword uint16 + Eide_pio_modes uint16 + Eide_dma_min uint16 + Eide_dma_time uint16 + Eide_pio uint16 + Eide_pio_iordy uint16 + Words69_70 [2]uint16 + Words71_74 [4]uint16 + Queue_depth uint16 + Words76_79 [4]uint16 + Major_rev_num uint16 + Minor_rev_num uint16 + Command_set_1 uint16 + Command_set_2 uint16 + Cfsse uint16 + Cfs_enable_1 uint16 + Cfs_enable_2 uint16 + Csf_default uint16 + Dma_ultra uint16 + Trseuc uint16 + TrsEuc uint16 + CurAPMvalues uint16 + Mprc uint16 + Hw_config uint16 + Acoustic uint16 + Msrqs uint16 + Sxfert uint16 + Sal uint16 + Spg uint32 + Lba_capacity_2 uint64 + Words104_125 [22]uint16 + Last_lun uint16 + Word127 uint16 + Dlf uint16 + Csfo uint16 + Words130_155 [26]uint16 + Word156 uint16 + Words157_159 [3]uint16 + Cfa_power uint16 + Words161_175 [15]uint16 + Words176_205 [30]uint16 + Words206_254 [49]uint16 + Integrity_word uint16 +} + +type Statfs_t struct { + Type int64 + Bsize int64 + Frsize int64 + Blocks uint64 + Bfree uint64 + Files uint64 + Ffree uint64 + Bavail uint64 + Fsid Fsid + Namelen int64 + Flags int64 + Spare [5]int64 +} + +const ( + ST_MANDLOCK = 0x40 + ST_NOATIME = 0x400 + ST_NODEV = 0x4 + ST_NODIRATIME = 0x800 + ST_NOEXEC = 0x8 + ST_NOSUID = 0x2 + ST_RDONLY = 0x1 + ST_RELATIME = 0x1000 + ST_SYNCHRONOUS = 0x10 +) + +type TpacketHdr struct { + Status uint64 + Len uint32 + Snaplen uint32 + Mac uint16 + Net uint16 + Sec uint32 + Usec uint32 + _ [4]byte +} + +type Tpacket2Hdr struct { + Status uint32 + Len uint32 + Snaplen uint32 + Mac uint16 + Net uint16 + Sec uint32 + Nsec uint32 + Vlan_tci uint16 + Vlan_tpid uint16 + _ [4]uint8 +} + +type Tpacket3Hdr struct { + Next_offset uint32 + Sec uint32 + Nsec uint32 + Snaplen uint32 + Len uint32 + Status uint32 + Mac uint16 + Net uint16 + Hv1 TpacketHdrVariant1 + _ [8]uint8 +} + +type TpacketHdrVariant1 struct { + Rxhash uint32 + Vlan_tci uint32 + Vlan_tpid uint16 + _ uint16 +} + +type TpacketBlockDesc struct { + Version uint32 + To_priv uint32 + Hdr [40]byte +} + +type TpacketReq struct { + Block_size uint32 + Block_nr uint32 + Frame_size uint32 + Frame_nr uint32 +} + +type TpacketReq3 struct { + Block_size uint32 + Block_nr uint32 + Frame_size uint32 + Frame_nr uint32 + Retire_blk_tov uint32 + Sizeof_priv uint32 + Feature_req_word uint32 +} + +type TpacketStats struct { + Packets uint32 + Drops uint32 +} + +type TpacketStatsV3 struct { + Packets uint32 + Drops uint32 + Freeze_q_cnt uint32 +} + +type TpacketAuxdata struct { + Status uint32 + Len uint32 + Snaplen uint32 + Mac uint16 + Net uint16 + Vlan_tci uint16 + Vlan_tpid uint16 +} + +const ( + TPACKET_V1 = 0x0 + TPACKET_V2 = 0x1 + TPACKET_V3 = 0x2 +) + +const ( + SizeofTpacketHdr = 0x20 + SizeofTpacket2Hdr = 0x20 + SizeofTpacket3Hdr = 0x30 +) + +const ( + NF_INET_PRE_ROUTING = 0x0 + NF_INET_LOCAL_IN = 0x1 + NF_INET_FORWARD = 0x2 + NF_INET_LOCAL_OUT = 0x3 + NF_INET_POST_ROUTING = 0x4 + NF_INET_NUMHOOKS = 0x5 +) + +const ( + NF_NETDEV_INGRESS = 0x0 + NF_NETDEV_NUMHOOKS = 0x1 +) + +const ( + NFPROTO_UNSPEC = 0x0 + NFPROTO_INET = 0x1 + NFPROTO_IPV4 = 0x2 + NFPROTO_ARP = 0x3 + NFPROTO_NETDEV = 0x5 + NFPROTO_BRIDGE = 0x7 + NFPROTO_IPV6 = 0xa + NFPROTO_DECNET = 0xc + NFPROTO_NUMPROTO = 0xd +) + +type Nfgenmsg struct { + Nfgen_family uint8 + Version uint8 + Res_id uint16 +} + +const ( + NFNL_BATCH_UNSPEC = 0x0 + NFNL_BATCH_GENID = 0x1 +) + +const ( + NFT_REG_VERDICT = 0x0 + NFT_REG_1 = 0x1 + NFT_REG_2 = 0x2 + NFT_REG_3 = 0x3 + NFT_REG_4 = 0x4 + NFT_REG32_00 = 0x8 + NFT_REG32_01 = 0x9 + NFT_REG32_02 = 0xa + NFT_REG32_03 = 0xb + NFT_REG32_04 = 0xc + NFT_REG32_05 = 0xd + NFT_REG32_06 = 0xe + NFT_REG32_07 = 0xf + NFT_REG32_08 = 0x10 + NFT_REG32_09 = 0x11 + NFT_REG32_10 = 0x12 + NFT_REG32_11 = 0x13 + NFT_REG32_12 = 0x14 + NFT_REG32_13 = 0x15 + NFT_REG32_14 = 0x16 + NFT_REG32_15 = 0x17 + NFT_CONTINUE = -0x1 + NFT_BREAK = -0x2 + NFT_JUMP = -0x3 + NFT_GOTO = -0x4 + NFT_RETURN = -0x5 + NFT_MSG_NEWTABLE = 0x0 + NFT_MSG_GETTABLE = 0x1 + NFT_MSG_DELTABLE = 0x2 + NFT_MSG_NEWCHAIN = 0x3 + NFT_MSG_GETCHAIN = 0x4 + NFT_MSG_DELCHAIN = 0x5 + NFT_MSG_NEWRULE = 0x6 + NFT_MSG_GETRULE = 0x7 + NFT_MSG_DELRULE = 0x8 + NFT_MSG_NEWSET = 0x9 + NFT_MSG_GETSET = 0xa + NFT_MSG_DELSET = 0xb + NFT_MSG_NEWSETELEM = 0xc + NFT_MSG_GETSETELEM = 0xd + NFT_MSG_DELSETELEM = 0xe + NFT_MSG_NEWGEN = 0xf + NFT_MSG_GETGEN = 0x10 + NFT_MSG_TRACE = 0x11 + NFT_MSG_NEWOBJ = 0x12 + NFT_MSG_GETOBJ = 0x13 + NFT_MSG_DELOBJ = 0x14 + NFT_MSG_GETOBJ_RESET = 0x15 + NFT_MSG_MAX = 0x19 + NFTA_LIST_UNPEC = 0x0 + NFTA_LIST_ELEM = 0x1 + NFTA_HOOK_UNSPEC = 0x0 + NFTA_HOOK_HOOKNUM = 0x1 + NFTA_HOOK_PRIORITY = 0x2 + NFTA_HOOK_DEV = 0x3 + NFT_TABLE_F_DORMANT = 0x1 + NFTA_TABLE_UNSPEC = 0x0 + NFTA_TABLE_NAME = 0x1 + NFTA_TABLE_FLAGS = 0x2 + NFTA_TABLE_USE = 0x3 + NFTA_CHAIN_UNSPEC = 0x0 + NFTA_CHAIN_TABLE = 0x1 + NFTA_CHAIN_HANDLE = 0x2 + NFTA_CHAIN_NAME = 0x3 + NFTA_CHAIN_HOOK = 0x4 + NFTA_CHAIN_POLICY = 0x5 + NFTA_CHAIN_USE = 0x6 + NFTA_CHAIN_TYPE = 0x7 + NFTA_CHAIN_COUNTERS = 0x8 + NFTA_CHAIN_PAD = 0x9 + NFTA_RULE_UNSPEC = 0x0 + NFTA_RULE_TABLE = 0x1 + NFTA_RULE_CHAIN = 0x2 + NFTA_RULE_HANDLE = 0x3 + NFTA_RULE_EXPRESSIONS = 0x4 + NFTA_RULE_COMPAT = 0x5 + NFTA_RULE_POSITION = 0x6 + NFTA_RULE_USERDATA = 0x7 + NFTA_RULE_PAD = 0x8 + NFTA_RULE_ID = 0x9 + NFT_RULE_COMPAT_F_INV = 0x2 + NFT_RULE_COMPAT_F_MASK = 0x2 + NFTA_RULE_COMPAT_UNSPEC = 0x0 + NFTA_RULE_COMPAT_PROTO = 0x1 + NFTA_RULE_COMPAT_FLAGS = 0x2 + NFT_SET_ANONYMOUS = 0x1 + NFT_SET_CONSTANT = 0x2 + NFT_SET_INTERVAL = 0x4 + NFT_SET_MAP = 0x8 + NFT_SET_TIMEOUT = 0x10 + NFT_SET_EVAL = 0x20 + NFT_SET_OBJECT = 0x40 + NFT_SET_POL_PERFORMANCE = 0x0 + NFT_SET_POL_MEMORY = 0x1 + NFTA_SET_DESC_UNSPEC = 0x0 + NFTA_SET_DESC_SIZE = 0x1 + NFTA_SET_UNSPEC = 0x0 + NFTA_SET_TABLE = 0x1 + NFTA_SET_NAME = 0x2 + NFTA_SET_FLAGS = 0x3 + NFTA_SET_KEY_TYPE = 0x4 + NFTA_SET_KEY_LEN = 0x5 + NFTA_SET_DATA_TYPE = 0x6 + NFTA_SET_DATA_LEN = 0x7 + NFTA_SET_POLICY = 0x8 + NFTA_SET_DESC = 0x9 + NFTA_SET_ID = 0xa + NFTA_SET_TIMEOUT = 0xb + NFTA_SET_GC_INTERVAL = 0xc + NFTA_SET_USERDATA = 0xd + NFTA_SET_PAD = 0xe + NFTA_SET_OBJ_TYPE = 0xf + NFT_SET_ELEM_INTERVAL_END = 0x1 + NFTA_SET_ELEM_UNSPEC = 0x0 + NFTA_SET_ELEM_KEY = 0x1 + NFTA_SET_ELEM_DATA = 0x2 + NFTA_SET_ELEM_FLAGS = 0x3 + NFTA_SET_ELEM_TIMEOUT = 0x4 + NFTA_SET_ELEM_EXPIRATION = 0x5 + NFTA_SET_ELEM_USERDATA = 0x6 + NFTA_SET_ELEM_EXPR = 0x7 + NFTA_SET_ELEM_PAD = 0x8 + NFTA_SET_ELEM_OBJREF = 0x9 + NFTA_SET_ELEM_LIST_UNSPEC = 0x0 + NFTA_SET_ELEM_LIST_TABLE = 0x1 + NFTA_SET_ELEM_LIST_SET = 0x2 + NFTA_SET_ELEM_LIST_ELEMENTS = 0x3 + NFTA_SET_ELEM_LIST_SET_ID = 0x4 + NFT_DATA_VALUE = 0x0 + NFT_DATA_VERDICT = 0xffffff00 + NFTA_DATA_UNSPEC = 0x0 + NFTA_DATA_VALUE = 0x1 + NFTA_DATA_VERDICT = 0x2 + NFTA_VERDICT_UNSPEC = 0x0 + NFTA_VERDICT_CODE = 0x1 + NFTA_VERDICT_CHAIN = 0x2 + NFTA_EXPR_UNSPEC = 0x0 + NFTA_EXPR_NAME = 0x1 + NFTA_EXPR_DATA = 0x2 + NFTA_IMMEDIATE_UNSPEC = 0x0 + NFTA_IMMEDIATE_DREG = 0x1 + NFTA_IMMEDIATE_DATA = 0x2 + NFTA_BITWISE_UNSPEC = 0x0 + NFTA_BITWISE_SREG = 0x1 + NFTA_BITWISE_DREG = 0x2 + NFTA_BITWISE_LEN = 0x3 + NFTA_BITWISE_MASK = 0x4 + NFTA_BITWISE_XOR = 0x5 + NFT_BYTEORDER_NTOH = 0x0 + NFT_BYTEORDER_HTON = 0x1 + NFTA_BYTEORDER_UNSPEC = 0x0 + NFTA_BYTEORDER_SREG = 0x1 + NFTA_BYTEORDER_DREG = 0x2 + NFTA_BYTEORDER_OP = 0x3 + NFTA_BYTEORDER_LEN = 0x4 + NFTA_BYTEORDER_SIZE = 0x5 + NFT_CMP_EQ = 0x0 + NFT_CMP_NEQ = 0x1 + NFT_CMP_LT = 0x2 + NFT_CMP_LTE = 0x3 + NFT_CMP_GT = 0x4 + NFT_CMP_GTE = 0x5 + NFTA_CMP_UNSPEC = 0x0 + NFTA_CMP_SREG = 0x1 + NFTA_CMP_OP = 0x2 + NFTA_CMP_DATA = 0x3 + NFT_RANGE_EQ = 0x0 + NFT_RANGE_NEQ = 0x1 + NFTA_RANGE_UNSPEC = 0x0 + NFTA_RANGE_SREG = 0x1 + NFTA_RANGE_OP = 0x2 + NFTA_RANGE_FROM_DATA = 0x3 + NFTA_RANGE_TO_DATA = 0x4 + NFT_LOOKUP_F_INV = 0x1 + NFTA_LOOKUP_UNSPEC = 0x0 + NFTA_LOOKUP_SET = 0x1 + NFTA_LOOKUP_SREG = 0x2 + NFTA_LOOKUP_DREG = 0x3 + NFTA_LOOKUP_SET_ID = 0x4 + NFTA_LOOKUP_FLAGS = 0x5 + NFT_DYNSET_OP_ADD = 0x0 + NFT_DYNSET_OP_UPDATE = 0x1 + NFT_DYNSET_F_INV = 0x1 + NFTA_DYNSET_UNSPEC = 0x0 + NFTA_DYNSET_SET_NAME = 0x1 + NFTA_DYNSET_SET_ID = 0x2 + NFTA_DYNSET_OP = 0x3 + NFTA_DYNSET_SREG_KEY = 0x4 + NFTA_DYNSET_SREG_DATA = 0x5 + NFTA_DYNSET_TIMEOUT = 0x6 + NFTA_DYNSET_EXPR = 0x7 + NFTA_DYNSET_PAD = 0x8 + NFTA_DYNSET_FLAGS = 0x9 + NFT_PAYLOAD_LL_HEADER = 0x0 + NFT_PAYLOAD_NETWORK_HEADER = 0x1 + NFT_PAYLOAD_TRANSPORT_HEADER = 0x2 + NFT_PAYLOAD_CSUM_NONE = 0x0 + NFT_PAYLOAD_CSUM_INET = 0x1 + NFT_PAYLOAD_L4CSUM_PSEUDOHDR = 0x1 + NFTA_PAYLOAD_UNSPEC = 0x0 + NFTA_PAYLOAD_DREG = 0x1 + NFTA_PAYLOAD_BASE = 0x2 + NFTA_PAYLOAD_OFFSET = 0x3 + NFTA_PAYLOAD_LEN = 0x4 + NFTA_PAYLOAD_SREG = 0x5 + NFTA_PAYLOAD_CSUM_TYPE = 0x6 + NFTA_PAYLOAD_CSUM_OFFSET = 0x7 + NFTA_PAYLOAD_CSUM_FLAGS = 0x8 + NFT_EXTHDR_F_PRESENT = 0x1 + NFT_EXTHDR_OP_IPV6 = 0x0 + NFT_EXTHDR_OP_TCPOPT = 0x1 + NFTA_EXTHDR_UNSPEC = 0x0 + NFTA_EXTHDR_DREG = 0x1 + NFTA_EXTHDR_TYPE = 0x2 + NFTA_EXTHDR_OFFSET = 0x3 + NFTA_EXTHDR_LEN = 0x4 + NFTA_EXTHDR_FLAGS = 0x5 + NFTA_EXTHDR_OP = 0x6 + NFTA_EXTHDR_SREG = 0x7 + NFT_META_LEN = 0x0 + NFT_META_PROTOCOL = 0x1 + NFT_META_PRIORITY = 0x2 + NFT_META_MARK = 0x3 + NFT_META_IIF = 0x4 + NFT_META_OIF = 0x5 + NFT_META_IIFNAME = 0x6 + NFT_META_OIFNAME = 0x7 + NFT_META_IIFTYPE = 0x8 + NFT_META_OIFTYPE = 0x9 + NFT_META_SKUID = 0xa + NFT_META_SKGID = 0xb + NFT_META_NFTRACE = 0xc + NFT_META_RTCLASSID = 0xd + NFT_META_SECMARK = 0xe + NFT_META_NFPROTO = 0xf + NFT_META_L4PROTO = 0x10 + NFT_META_BRI_IIFNAME = 0x11 + NFT_META_BRI_OIFNAME = 0x12 + NFT_META_PKTTYPE = 0x13 + NFT_META_CPU = 0x14 + NFT_META_IIFGROUP = 0x15 + NFT_META_OIFGROUP = 0x16 + NFT_META_CGROUP = 0x17 + NFT_META_PRANDOM = 0x18 + NFT_RT_CLASSID = 0x0 + NFT_RT_NEXTHOP4 = 0x1 + NFT_RT_NEXTHOP6 = 0x2 + NFT_RT_TCPMSS = 0x3 + NFT_HASH_JENKINS = 0x0 + NFT_HASH_SYM = 0x1 + NFTA_HASH_UNSPEC = 0x0 + NFTA_HASH_SREG = 0x1 + NFTA_HASH_DREG = 0x2 + NFTA_HASH_LEN = 0x3 + NFTA_HASH_MODULUS = 0x4 + NFTA_HASH_SEED = 0x5 + NFTA_HASH_OFFSET = 0x6 + NFTA_HASH_TYPE = 0x7 + NFTA_META_UNSPEC = 0x0 + NFTA_META_DREG = 0x1 + NFTA_META_KEY = 0x2 + NFTA_META_SREG = 0x3 + NFTA_RT_UNSPEC = 0x0 + NFTA_RT_DREG = 0x1 + NFTA_RT_KEY = 0x2 + NFT_CT_STATE = 0x0 + NFT_CT_DIRECTION = 0x1 + NFT_CT_STATUS = 0x2 + NFT_CT_MARK = 0x3 + NFT_CT_SECMARK = 0x4 + NFT_CT_EXPIRATION = 0x5 + NFT_CT_HELPER = 0x6 + NFT_CT_L3PROTOCOL = 0x7 + NFT_CT_SRC = 0x8 + NFT_CT_DST = 0x9 + NFT_CT_PROTOCOL = 0xa + NFT_CT_PROTO_SRC = 0xb + NFT_CT_PROTO_DST = 0xc + NFT_CT_LABELS = 0xd + NFT_CT_PKTS = 0xe + NFT_CT_BYTES = 0xf + NFT_CT_AVGPKT = 0x10 + NFT_CT_ZONE = 0x11 + NFT_CT_EVENTMASK = 0x12 + NFTA_CT_UNSPEC = 0x0 + NFTA_CT_DREG = 0x1 + NFTA_CT_KEY = 0x2 + NFTA_CT_DIRECTION = 0x3 + NFTA_CT_SREG = 0x4 + NFT_LIMIT_PKTS = 0x0 + NFT_LIMIT_PKT_BYTES = 0x1 + NFT_LIMIT_F_INV = 0x1 + NFTA_LIMIT_UNSPEC = 0x0 + NFTA_LIMIT_RATE = 0x1 + NFTA_LIMIT_UNIT = 0x2 + NFTA_LIMIT_BURST = 0x3 + NFTA_LIMIT_TYPE = 0x4 + NFTA_LIMIT_FLAGS = 0x5 + NFTA_LIMIT_PAD = 0x6 + NFTA_COUNTER_UNSPEC = 0x0 + NFTA_COUNTER_BYTES = 0x1 + NFTA_COUNTER_PACKETS = 0x2 + NFTA_COUNTER_PAD = 0x3 + NFTA_LOG_UNSPEC = 0x0 + NFTA_LOG_GROUP = 0x1 + NFTA_LOG_PREFIX = 0x2 + NFTA_LOG_SNAPLEN = 0x3 + NFTA_LOG_QTHRESHOLD = 0x4 + NFTA_LOG_LEVEL = 0x5 + NFTA_LOG_FLAGS = 0x6 + NFTA_QUEUE_UNSPEC = 0x0 + NFTA_QUEUE_NUM = 0x1 + NFTA_QUEUE_TOTAL = 0x2 + NFTA_QUEUE_FLAGS = 0x3 + NFTA_QUEUE_SREG_QNUM = 0x4 + NFT_QUOTA_F_INV = 0x1 + NFT_QUOTA_F_DEPLETED = 0x2 + NFTA_QUOTA_UNSPEC = 0x0 + NFTA_QUOTA_BYTES = 0x1 + NFTA_QUOTA_FLAGS = 0x2 + NFTA_QUOTA_PAD = 0x3 + NFTA_QUOTA_CONSUMED = 0x4 + NFT_REJECT_ICMP_UNREACH = 0x0 + NFT_REJECT_TCP_RST = 0x1 + NFT_REJECT_ICMPX_UNREACH = 0x2 + NFT_REJECT_ICMPX_NO_ROUTE = 0x0 + NFT_REJECT_ICMPX_PORT_UNREACH = 0x1 + NFT_REJECT_ICMPX_HOST_UNREACH = 0x2 + NFT_REJECT_ICMPX_ADMIN_PROHIBITED = 0x3 + NFTA_REJECT_UNSPEC = 0x0 + NFTA_REJECT_TYPE = 0x1 + NFTA_REJECT_ICMP_CODE = 0x2 + NFT_NAT_SNAT = 0x0 + NFT_NAT_DNAT = 0x1 + NFTA_NAT_UNSPEC = 0x0 + NFTA_NAT_TYPE = 0x1 + NFTA_NAT_FAMILY = 0x2 + NFTA_NAT_REG_ADDR_MIN = 0x3 + NFTA_NAT_REG_ADDR_MAX = 0x4 + NFTA_NAT_REG_PROTO_MIN = 0x5 + NFTA_NAT_REG_PROTO_MAX = 0x6 + NFTA_NAT_FLAGS = 0x7 + NFTA_MASQ_UNSPEC = 0x0 + NFTA_MASQ_FLAGS = 0x1 + NFTA_MASQ_REG_PROTO_MIN = 0x2 + NFTA_MASQ_REG_PROTO_MAX = 0x3 + NFTA_REDIR_UNSPEC = 0x0 + NFTA_REDIR_REG_PROTO_MIN = 0x1 + NFTA_REDIR_REG_PROTO_MAX = 0x2 + NFTA_REDIR_FLAGS = 0x3 + NFTA_DUP_UNSPEC = 0x0 + NFTA_DUP_SREG_ADDR = 0x1 + NFTA_DUP_SREG_DEV = 0x2 + NFTA_FWD_UNSPEC = 0x0 + NFTA_FWD_SREG_DEV = 0x1 + NFTA_OBJREF_UNSPEC = 0x0 + NFTA_OBJREF_IMM_TYPE = 0x1 + NFTA_OBJREF_IMM_NAME = 0x2 + NFTA_OBJREF_SET_SREG = 0x3 + NFTA_OBJREF_SET_NAME = 0x4 + NFTA_OBJREF_SET_ID = 0x5 + NFTA_GEN_UNSPEC = 0x0 + NFTA_GEN_ID = 0x1 + NFTA_GEN_PROC_PID = 0x2 + NFTA_GEN_PROC_NAME = 0x3 + NFTA_FIB_UNSPEC = 0x0 + NFTA_FIB_DREG = 0x1 + NFTA_FIB_RESULT = 0x2 + NFTA_FIB_FLAGS = 0x3 + NFT_FIB_RESULT_UNSPEC = 0x0 + NFT_FIB_RESULT_OIF = 0x1 + NFT_FIB_RESULT_OIFNAME = 0x2 + NFT_FIB_RESULT_ADDRTYPE = 0x3 + NFTA_FIB_F_SADDR = 0x1 + NFTA_FIB_F_DADDR = 0x2 + NFTA_FIB_F_MARK = 0x4 + NFTA_FIB_F_IIF = 0x8 + NFTA_FIB_F_OIF = 0x10 + NFTA_FIB_F_PRESENT = 0x20 + NFTA_CT_HELPER_UNSPEC = 0x0 + NFTA_CT_HELPER_NAME = 0x1 + NFTA_CT_HELPER_L3PROTO = 0x2 + NFTA_CT_HELPER_L4PROTO = 0x3 + NFTA_OBJ_UNSPEC = 0x0 + NFTA_OBJ_TABLE = 0x1 + NFTA_OBJ_NAME = 0x2 + NFTA_OBJ_TYPE = 0x3 + NFTA_OBJ_DATA = 0x4 + NFTA_OBJ_USE = 0x5 + NFTA_TRACE_UNSPEC = 0x0 + NFTA_TRACE_TABLE = 0x1 + NFTA_TRACE_CHAIN = 0x2 + NFTA_TRACE_RULE_HANDLE = 0x3 + NFTA_TRACE_TYPE = 0x4 + NFTA_TRACE_VERDICT = 0x5 + NFTA_TRACE_ID = 0x6 + NFTA_TRACE_LL_HEADER = 0x7 + NFTA_TRACE_NETWORK_HEADER = 0x8 + NFTA_TRACE_TRANSPORT_HEADER = 0x9 + NFTA_TRACE_IIF = 0xa + NFTA_TRACE_IIFTYPE = 0xb + NFTA_TRACE_OIF = 0xc + NFTA_TRACE_OIFTYPE = 0xd + NFTA_TRACE_MARK = 0xe + NFTA_TRACE_NFPROTO = 0xf + NFTA_TRACE_POLICY = 0x10 + NFTA_TRACE_PAD = 0x11 + NFT_TRACETYPE_UNSPEC = 0x0 + NFT_TRACETYPE_POLICY = 0x1 + NFT_TRACETYPE_RETURN = 0x2 + NFT_TRACETYPE_RULE = 0x3 + NFTA_NG_UNSPEC = 0x0 + NFTA_NG_DREG = 0x1 + NFTA_NG_MODULUS = 0x2 + NFTA_NG_TYPE = 0x3 + NFTA_NG_OFFSET = 0x4 + NFT_NG_INCREMENTAL = 0x0 + NFT_NG_RANDOM = 0x1 +) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go index b8da482ca73..a9d0131f2ee 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go @@ -117,25 +117,10 @@ type Stat_t struct { Blocks int64 } -type Statfs_t struct { - Type int64 - Bsize int64 - Frsize int64 - Blocks uint64 - Bfree uint64 - Files uint64 - Ffree uint64 - Bavail uint64 - Fsid Fsid - Namelen int64 - Flags int64 - Spare [5]int64 -} - type StatxTimestamp struct { - Sec int64 - Nsec uint32 - X__reserved int32 + Sec int64 + Nsec uint32 + _ int32 } type Statx_t struct { @@ -172,7 +157,7 @@ type Dirent struct { } type Fsid struct { - X__val [2]int32 + Val [2]int32 } type Flock_t struct { @@ -496,7 +481,7 @@ const ( IFLA_EVENT = 0x2c IFLA_NEW_NETNSID = 0x2d IFLA_IF_NETNSID = 0x2e - IFLA_MAX = 0x2e + IFLA_MAX = 0x31 RT_SCOPE_UNIVERSE = 0x0 RT_SCOPE_SITE = 0xc8 RT_SCOPE_LINK = 0xfd @@ -588,12 +573,12 @@ type RtAttr struct { } type IfInfomsg struct { - Family uint8 - X__ifi_pad uint8 - Type uint16 - Index int32 - Flags uint32 - Change uint32 + Family uint8 + _ uint8 + Type uint16 + Index int32 + Flags uint32 + Change uint32 } type IfAddrmsg struct { @@ -679,7 +664,7 @@ type Sysinfo_t struct { Totalhigh uint64 Freehigh uint64 Unit uint32 - X_f [0]int8 + _ [0]int8 _ [4]byte } @@ -738,7 +723,7 @@ const ( ) type Sigset_t struct { - X__val [16]uint64 + Val [16]uint64 } const RNDGETENTCNT = 0x40045200 @@ -894,3 +879,938 @@ const ( BDADDR_LE_PUBLIC = 0x1 BDADDR_LE_RANDOM = 0x2 ) + +type PerfEventAttr struct { + Type uint32 + Size uint32 + Config uint64 + Sample uint64 + Sample_type uint64 + Read_format uint64 + Bits uint64 + Wakeup uint32 + Bp_type uint32 + Ext1 uint64 + Ext2 uint64 + Branch_sample_type uint64 + Sample_regs_user uint64 + Sample_stack_user uint32 + Clockid int32 + Sample_regs_intr uint64 + Aux_watermark uint32 + _ uint32 +} + +type PerfEventMmapPage struct { + Version uint32 + Compat_version uint32 + Lock uint32 + Index uint32 + Offset int64 + Time_enabled uint64 + Time_running uint64 + Capabilities uint64 + Pmc_width uint16 + Time_shift uint16 + Time_mult uint32 + Time_offset uint64 + Time_zero uint64 + Size uint32 + _ [948]uint8 + Data_head uint64 + Data_tail uint64 + Data_offset uint64 + Data_size uint64 + Aux_head uint64 + Aux_tail uint64 + Aux_offset uint64 + Aux_size uint64 +} + +const ( + PerfBitDisabled uint64 = CBitFieldMaskBit0 + PerfBitInherit = CBitFieldMaskBit1 + PerfBitPinned = CBitFieldMaskBit2 + PerfBitExclusive = CBitFieldMaskBit3 + PerfBitExcludeUser = CBitFieldMaskBit4 + PerfBitExcludeKernel = CBitFieldMaskBit5 + PerfBitExcludeHv = CBitFieldMaskBit6 + PerfBitExcludeIdle = CBitFieldMaskBit7 + PerfBitMmap = CBitFieldMaskBit8 + PerfBitComm = CBitFieldMaskBit9 + PerfBitFreq = CBitFieldMaskBit10 + PerfBitInheritStat = CBitFieldMaskBit11 + PerfBitEnableOnExec = CBitFieldMaskBit12 + PerfBitTask = CBitFieldMaskBit13 + PerfBitWatermark = CBitFieldMaskBit14 + PerfBitPreciseIPBit1 = CBitFieldMaskBit15 + PerfBitPreciseIPBit2 = CBitFieldMaskBit16 + PerfBitMmapData = CBitFieldMaskBit17 + PerfBitSampleIDAll = CBitFieldMaskBit18 + PerfBitExcludeHost = CBitFieldMaskBit19 + PerfBitExcludeGuest = CBitFieldMaskBit20 + PerfBitExcludeCallchainKernel = CBitFieldMaskBit21 + PerfBitExcludeCallchainUser = CBitFieldMaskBit22 + PerfBitMmap2 = CBitFieldMaskBit23 + PerfBitCommExec = CBitFieldMaskBit24 + PerfBitUseClockID = CBitFieldMaskBit25 + PerfBitContextSwitch = CBitFieldMaskBit26 +) + +const ( + PERF_TYPE_HARDWARE = 0x0 + PERF_TYPE_SOFTWARE = 0x1 + PERF_TYPE_TRACEPOINT = 0x2 + PERF_TYPE_HW_CACHE = 0x3 + PERF_TYPE_RAW = 0x4 + PERF_TYPE_BREAKPOINT = 0x5 + + PERF_COUNT_HW_CPU_CYCLES = 0x0 + PERF_COUNT_HW_INSTRUCTIONS = 0x1 + PERF_COUNT_HW_CACHE_REFERENCES = 0x2 + PERF_COUNT_HW_CACHE_MISSES = 0x3 + PERF_COUNT_HW_BRANCH_INSTRUCTIONS = 0x4 + PERF_COUNT_HW_BRANCH_MISSES = 0x5 + PERF_COUNT_HW_BUS_CYCLES = 0x6 + PERF_COUNT_HW_STALLED_CYCLES_FRONTEND = 0x7 + PERF_COUNT_HW_STALLED_CYCLES_BACKEND = 0x8 + PERF_COUNT_HW_REF_CPU_CYCLES = 0x9 + + PERF_COUNT_HW_CACHE_L1D = 0x0 + PERF_COUNT_HW_CACHE_L1I = 0x1 + PERF_COUNT_HW_CACHE_LL = 0x2 + PERF_COUNT_HW_CACHE_DTLB = 0x3 + PERF_COUNT_HW_CACHE_ITLB = 0x4 + PERF_COUNT_HW_CACHE_BPU = 0x5 + PERF_COUNT_HW_CACHE_NODE = 0x6 + + PERF_COUNT_HW_CACHE_OP_READ = 0x0 + PERF_COUNT_HW_CACHE_OP_WRITE = 0x1 + PERF_COUNT_HW_CACHE_OP_PREFETCH = 0x2 + + PERF_COUNT_HW_CACHE_RESULT_ACCESS = 0x0 + PERF_COUNT_HW_CACHE_RESULT_MISS = 0x1 + + PERF_COUNT_SW_CPU_CLOCK = 0x0 + PERF_COUNT_SW_TASK_CLOCK = 0x1 + PERF_COUNT_SW_PAGE_FAULTS = 0x2 + PERF_COUNT_SW_CONTEXT_SWITCHES = 0x3 + PERF_COUNT_SW_CPU_MIGRATIONS = 0x4 + PERF_COUNT_SW_PAGE_FAULTS_MIN = 0x5 + PERF_COUNT_SW_PAGE_FAULTS_MAJ = 0x6 + PERF_COUNT_SW_ALIGNMENT_FAULTS = 0x7 + PERF_COUNT_SW_EMULATION_FAULTS = 0x8 + PERF_COUNT_SW_DUMMY = 0x9 + + PERF_SAMPLE_IP = 0x1 + PERF_SAMPLE_TID = 0x2 + PERF_SAMPLE_TIME = 0x4 + PERF_SAMPLE_ADDR = 0x8 + PERF_SAMPLE_READ = 0x10 + PERF_SAMPLE_CALLCHAIN = 0x20 + PERF_SAMPLE_ID = 0x40 + PERF_SAMPLE_CPU = 0x80 + PERF_SAMPLE_PERIOD = 0x100 + PERF_SAMPLE_STREAM_ID = 0x200 + PERF_SAMPLE_RAW = 0x400 + PERF_SAMPLE_BRANCH_STACK = 0x800 + + PERF_SAMPLE_BRANCH_USER = 0x1 + PERF_SAMPLE_BRANCH_KERNEL = 0x2 + PERF_SAMPLE_BRANCH_HV = 0x4 + PERF_SAMPLE_BRANCH_ANY = 0x8 + PERF_SAMPLE_BRANCH_ANY_CALL = 0x10 + PERF_SAMPLE_BRANCH_ANY_RETURN = 0x20 + PERF_SAMPLE_BRANCH_IND_CALL = 0x40 + + PERF_FORMAT_TOTAL_TIME_ENABLED = 0x1 + PERF_FORMAT_TOTAL_TIME_RUNNING = 0x2 + PERF_FORMAT_ID = 0x4 + PERF_FORMAT_GROUP = 0x8 + + PERF_RECORD_MMAP = 0x1 + PERF_RECORD_LOST = 0x2 + PERF_RECORD_COMM = 0x3 + PERF_RECORD_EXIT = 0x4 + PERF_RECORD_THROTTLE = 0x5 + PERF_RECORD_UNTHROTTLE = 0x6 + PERF_RECORD_FORK = 0x7 + PERF_RECORD_READ = 0x8 + PERF_RECORD_SAMPLE = 0x9 + + PERF_CONTEXT_HV = -0x20 + PERF_CONTEXT_KERNEL = -0x80 + PERF_CONTEXT_USER = -0x200 + + PERF_CONTEXT_GUEST = -0x800 + PERF_CONTEXT_GUEST_KERNEL = -0x880 + PERF_CONTEXT_GUEST_USER = -0xa00 + + PERF_FLAG_FD_NO_GROUP = 0x1 + PERF_FLAG_FD_OUTPUT = 0x2 + PERF_FLAG_PID_CGROUP = 0x4 +) + +const ( + CBitFieldMaskBit0 = 0x1 + CBitFieldMaskBit1 = 0x2 + CBitFieldMaskBit2 = 0x4 + CBitFieldMaskBit3 = 0x8 + CBitFieldMaskBit4 = 0x10 + CBitFieldMaskBit5 = 0x20 + CBitFieldMaskBit6 = 0x40 + CBitFieldMaskBit7 = 0x80 + CBitFieldMaskBit8 = 0x100 + CBitFieldMaskBit9 = 0x200 + CBitFieldMaskBit10 = 0x400 + CBitFieldMaskBit11 = 0x800 + CBitFieldMaskBit12 = 0x1000 + CBitFieldMaskBit13 = 0x2000 + CBitFieldMaskBit14 = 0x4000 + CBitFieldMaskBit15 = 0x8000 + CBitFieldMaskBit16 = 0x10000 + CBitFieldMaskBit17 = 0x20000 + CBitFieldMaskBit18 = 0x40000 + CBitFieldMaskBit19 = 0x80000 + CBitFieldMaskBit20 = 0x100000 + CBitFieldMaskBit21 = 0x200000 + CBitFieldMaskBit22 = 0x400000 + CBitFieldMaskBit23 = 0x800000 + CBitFieldMaskBit24 = 0x1000000 + CBitFieldMaskBit25 = 0x2000000 + CBitFieldMaskBit26 = 0x4000000 + CBitFieldMaskBit27 = 0x8000000 + CBitFieldMaskBit28 = 0x10000000 + CBitFieldMaskBit29 = 0x20000000 + CBitFieldMaskBit30 = 0x40000000 + CBitFieldMaskBit31 = 0x80000000 + CBitFieldMaskBit32 = 0x100000000 + CBitFieldMaskBit33 = 0x200000000 + CBitFieldMaskBit34 = 0x400000000 + CBitFieldMaskBit35 = 0x800000000 + CBitFieldMaskBit36 = 0x1000000000 + CBitFieldMaskBit37 = 0x2000000000 + CBitFieldMaskBit38 = 0x4000000000 + CBitFieldMaskBit39 = 0x8000000000 + CBitFieldMaskBit40 = 0x10000000000 + CBitFieldMaskBit41 = 0x20000000000 + CBitFieldMaskBit42 = 0x40000000000 + CBitFieldMaskBit43 = 0x80000000000 + CBitFieldMaskBit44 = 0x100000000000 + CBitFieldMaskBit45 = 0x200000000000 + CBitFieldMaskBit46 = 0x400000000000 + CBitFieldMaskBit47 = 0x800000000000 + CBitFieldMaskBit48 = 0x1000000000000 + CBitFieldMaskBit49 = 0x2000000000000 + CBitFieldMaskBit50 = 0x4000000000000 + CBitFieldMaskBit51 = 0x8000000000000 + CBitFieldMaskBit52 = 0x10000000000000 + CBitFieldMaskBit53 = 0x20000000000000 + CBitFieldMaskBit54 = 0x40000000000000 + CBitFieldMaskBit55 = 0x80000000000000 + CBitFieldMaskBit56 = 0x100000000000000 + CBitFieldMaskBit57 = 0x200000000000000 + CBitFieldMaskBit58 = 0x400000000000000 + CBitFieldMaskBit59 = 0x800000000000000 + CBitFieldMaskBit60 = 0x1000000000000000 + CBitFieldMaskBit61 = 0x2000000000000000 + CBitFieldMaskBit62 = 0x4000000000000000 + CBitFieldMaskBit63 = 0x8000000000000000 +) + +type SockaddrStorage struct { + Family uint16 + _ [118]int8 + _ uint64 +} + +type TCPMD5Sig struct { + Addr SockaddrStorage + Flags uint8 + Prefixlen uint8 + Keylen uint16 + _ uint32 + Key [80]uint8 +} + +type HDDriveCmdHdr struct { + Command uint8 + Number uint8 + Feature uint8 + Count uint8 +} + +type HDGeometry struct { + Heads uint8 + Sectors uint8 + Cylinders uint16 + _ [4]byte + Start uint64 +} + +type HDDriveID struct { + Config uint16 + Cyls uint16 + Reserved2 uint16 + Heads uint16 + Track_bytes uint16 + Sector_bytes uint16 + Sectors uint16 + Vendor0 uint16 + Vendor1 uint16 + Vendor2 uint16 + Serial_no [20]uint8 + Buf_type uint16 + Buf_size uint16 + Ecc_bytes uint16 + Fw_rev [8]uint8 + Model [40]uint8 + Max_multsect uint8 + Vendor3 uint8 + Dword_io uint16 + Vendor4 uint8 + Capability uint8 + Reserved50 uint16 + Vendor5 uint8 + TPIO uint8 + Vendor6 uint8 + TDMA uint8 + Field_valid uint16 + Cur_cyls uint16 + Cur_heads uint16 + Cur_sectors uint16 + Cur_capacity0 uint16 + Cur_capacity1 uint16 + Multsect uint8 + Multsect_valid uint8 + Lba_capacity uint32 + Dma_1word uint16 + Dma_mword uint16 + Eide_pio_modes uint16 + Eide_dma_min uint16 + Eide_dma_time uint16 + Eide_pio uint16 + Eide_pio_iordy uint16 + Words69_70 [2]uint16 + Words71_74 [4]uint16 + Queue_depth uint16 + Words76_79 [4]uint16 + Major_rev_num uint16 + Minor_rev_num uint16 + Command_set_1 uint16 + Command_set_2 uint16 + Cfsse uint16 + Cfs_enable_1 uint16 + Cfs_enable_2 uint16 + Csf_default uint16 + Dma_ultra uint16 + Trseuc uint16 + TrsEuc uint16 + CurAPMvalues uint16 + Mprc uint16 + Hw_config uint16 + Acoustic uint16 + Msrqs uint16 + Sxfert uint16 + Sal uint16 + Spg uint32 + Lba_capacity_2 uint64 + Words104_125 [22]uint16 + Last_lun uint16 + Word127 uint16 + Dlf uint16 + Csfo uint16 + Words130_155 [26]uint16 + Word156 uint16 + Words157_159 [3]uint16 + Cfa_power uint16 + Words161_175 [15]uint16 + Words176_205 [30]uint16 + Words206_254 [49]uint16 + Integrity_word uint16 +} + +type Statfs_t struct { + Type int64 + Bsize int64 + Frsize int64 + Blocks uint64 + Bfree uint64 + Files uint64 + Ffree uint64 + Bavail uint64 + Fsid Fsid + Namelen int64 + Flags int64 + Spare [5]int64 +} + +const ( + ST_MANDLOCK = 0x40 + ST_NOATIME = 0x400 + ST_NODEV = 0x4 + ST_NODIRATIME = 0x800 + ST_NOEXEC = 0x8 + ST_NOSUID = 0x2 + ST_RDONLY = 0x1 + ST_RELATIME = 0x1000 + ST_SYNCHRONOUS = 0x10 +) + +type TpacketHdr struct { + Status uint64 + Len uint32 + Snaplen uint32 + Mac uint16 + Net uint16 + Sec uint32 + Usec uint32 + _ [4]byte +} + +type Tpacket2Hdr struct { + Status uint32 + Len uint32 + Snaplen uint32 + Mac uint16 + Net uint16 + Sec uint32 + Nsec uint32 + Vlan_tci uint16 + Vlan_tpid uint16 + _ [4]uint8 +} + +type Tpacket3Hdr struct { + Next_offset uint32 + Sec uint32 + Nsec uint32 + Snaplen uint32 + Len uint32 + Status uint32 + Mac uint16 + Net uint16 + Hv1 TpacketHdrVariant1 + _ [8]uint8 +} + +type TpacketHdrVariant1 struct { + Rxhash uint32 + Vlan_tci uint32 + Vlan_tpid uint16 + _ uint16 +} + +type TpacketBlockDesc struct { + Version uint32 + To_priv uint32 + Hdr [40]byte +} + +type TpacketReq struct { + Block_size uint32 + Block_nr uint32 + Frame_size uint32 + Frame_nr uint32 +} + +type TpacketReq3 struct { + Block_size uint32 + Block_nr uint32 + Frame_size uint32 + Frame_nr uint32 + Retire_blk_tov uint32 + Sizeof_priv uint32 + Feature_req_word uint32 +} + +type TpacketStats struct { + Packets uint32 + Drops uint32 +} + +type TpacketStatsV3 struct { + Packets uint32 + Drops uint32 + Freeze_q_cnt uint32 +} + +type TpacketAuxdata struct { + Status uint32 + Len uint32 + Snaplen uint32 + Mac uint16 + Net uint16 + Vlan_tci uint16 + Vlan_tpid uint16 +} + +const ( + TPACKET_V1 = 0x0 + TPACKET_V2 = 0x1 + TPACKET_V3 = 0x2 +) + +const ( + SizeofTpacketHdr = 0x20 + SizeofTpacket2Hdr = 0x20 + SizeofTpacket3Hdr = 0x30 +) + +const ( + NF_INET_PRE_ROUTING = 0x0 + NF_INET_LOCAL_IN = 0x1 + NF_INET_FORWARD = 0x2 + NF_INET_LOCAL_OUT = 0x3 + NF_INET_POST_ROUTING = 0x4 + NF_INET_NUMHOOKS = 0x5 +) + +const ( + NF_NETDEV_INGRESS = 0x0 + NF_NETDEV_NUMHOOKS = 0x1 +) + +const ( + NFPROTO_UNSPEC = 0x0 + NFPROTO_INET = 0x1 + NFPROTO_IPV4 = 0x2 + NFPROTO_ARP = 0x3 + NFPROTO_NETDEV = 0x5 + NFPROTO_BRIDGE = 0x7 + NFPROTO_IPV6 = 0xa + NFPROTO_DECNET = 0xc + NFPROTO_NUMPROTO = 0xd +) + +type Nfgenmsg struct { + Nfgen_family uint8 + Version uint8 + Res_id uint16 +} + +const ( + NFNL_BATCH_UNSPEC = 0x0 + NFNL_BATCH_GENID = 0x1 +) + +const ( + NFT_REG_VERDICT = 0x0 + NFT_REG_1 = 0x1 + NFT_REG_2 = 0x2 + NFT_REG_3 = 0x3 + NFT_REG_4 = 0x4 + NFT_REG32_00 = 0x8 + NFT_REG32_01 = 0x9 + NFT_REG32_02 = 0xa + NFT_REG32_03 = 0xb + NFT_REG32_04 = 0xc + NFT_REG32_05 = 0xd + NFT_REG32_06 = 0xe + NFT_REG32_07 = 0xf + NFT_REG32_08 = 0x10 + NFT_REG32_09 = 0x11 + NFT_REG32_10 = 0x12 + NFT_REG32_11 = 0x13 + NFT_REG32_12 = 0x14 + NFT_REG32_13 = 0x15 + NFT_REG32_14 = 0x16 + NFT_REG32_15 = 0x17 + NFT_CONTINUE = -0x1 + NFT_BREAK = -0x2 + NFT_JUMP = -0x3 + NFT_GOTO = -0x4 + NFT_RETURN = -0x5 + NFT_MSG_NEWTABLE = 0x0 + NFT_MSG_GETTABLE = 0x1 + NFT_MSG_DELTABLE = 0x2 + NFT_MSG_NEWCHAIN = 0x3 + NFT_MSG_GETCHAIN = 0x4 + NFT_MSG_DELCHAIN = 0x5 + NFT_MSG_NEWRULE = 0x6 + NFT_MSG_GETRULE = 0x7 + NFT_MSG_DELRULE = 0x8 + NFT_MSG_NEWSET = 0x9 + NFT_MSG_GETSET = 0xa + NFT_MSG_DELSET = 0xb + NFT_MSG_NEWSETELEM = 0xc + NFT_MSG_GETSETELEM = 0xd + NFT_MSG_DELSETELEM = 0xe + NFT_MSG_NEWGEN = 0xf + NFT_MSG_GETGEN = 0x10 + NFT_MSG_TRACE = 0x11 + NFT_MSG_NEWOBJ = 0x12 + NFT_MSG_GETOBJ = 0x13 + NFT_MSG_DELOBJ = 0x14 + NFT_MSG_GETOBJ_RESET = 0x15 + NFT_MSG_MAX = 0x19 + NFTA_LIST_UNPEC = 0x0 + NFTA_LIST_ELEM = 0x1 + NFTA_HOOK_UNSPEC = 0x0 + NFTA_HOOK_HOOKNUM = 0x1 + NFTA_HOOK_PRIORITY = 0x2 + NFTA_HOOK_DEV = 0x3 + NFT_TABLE_F_DORMANT = 0x1 + NFTA_TABLE_UNSPEC = 0x0 + NFTA_TABLE_NAME = 0x1 + NFTA_TABLE_FLAGS = 0x2 + NFTA_TABLE_USE = 0x3 + NFTA_CHAIN_UNSPEC = 0x0 + NFTA_CHAIN_TABLE = 0x1 + NFTA_CHAIN_HANDLE = 0x2 + NFTA_CHAIN_NAME = 0x3 + NFTA_CHAIN_HOOK = 0x4 + NFTA_CHAIN_POLICY = 0x5 + NFTA_CHAIN_USE = 0x6 + NFTA_CHAIN_TYPE = 0x7 + NFTA_CHAIN_COUNTERS = 0x8 + NFTA_CHAIN_PAD = 0x9 + NFTA_RULE_UNSPEC = 0x0 + NFTA_RULE_TABLE = 0x1 + NFTA_RULE_CHAIN = 0x2 + NFTA_RULE_HANDLE = 0x3 + NFTA_RULE_EXPRESSIONS = 0x4 + NFTA_RULE_COMPAT = 0x5 + NFTA_RULE_POSITION = 0x6 + NFTA_RULE_USERDATA = 0x7 + NFTA_RULE_PAD = 0x8 + NFTA_RULE_ID = 0x9 + NFT_RULE_COMPAT_F_INV = 0x2 + NFT_RULE_COMPAT_F_MASK = 0x2 + NFTA_RULE_COMPAT_UNSPEC = 0x0 + NFTA_RULE_COMPAT_PROTO = 0x1 + NFTA_RULE_COMPAT_FLAGS = 0x2 + NFT_SET_ANONYMOUS = 0x1 + NFT_SET_CONSTANT = 0x2 + NFT_SET_INTERVAL = 0x4 + NFT_SET_MAP = 0x8 + NFT_SET_TIMEOUT = 0x10 + NFT_SET_EVAL = 0x20 + NFT_SET_OBJECT = 0x40 + NFT_SET_POL_PERFORMANCE = 0x0 + NFT_SET_POL_MEMORY = 0x1 + NFTA_SET_DESC_UNSPEC = 0x0 + NFTA_SET_DESC_SIZE = 0x1 + NFTA_SET_UNSPEC = 0x0 + NFTA_SET_TABLE = 0x1 + NFTA_SET_NAME = 0x2 + NFTA_SET_FLAGS = 0x3 + NFTA_SET_KEY_TYPE = 0x4 + NFTA_SET_KEY_LEN = 0x5 + NFTA_SET_DATA_TYPE = 0x6 + NFTA_SET_DATA_LEN = 0x7 + NFTA_SET_POLICY = 0x8 + NFTA_SET_DESC = 0x9 + NFTA_SET_ID = 0xa + NFTA_SET_TIMEOUT = 0xb + NFTA_SET_GC_INTERVAL = 0xc + NFTA_SET_USERDATA = 0xd + NFTA_SET_PAD = 0xe + NFTA_SET_OBJ_TYPE = 0xf + NFT_SET_ELEM_INTERVAL_END = 0x1 + NFTA_SET_ELEM_UNSPEC = 0x0 + NFTA_SET_ELEM_KEY = 0x1 + NFTA_SET_ELEM_DATA = 0x2 + NFTA_SET_ELEM_FLAGS = 0x3 + NFTA_SET_ELEM_TIMEOUT = 0x4 + NFTA_SET_ELEM_EXPIRATION = 0x5 + NFTA_SET_ELEM_USERDATA = 0x6 + NFTA_SET_ELEM_EXPR = 0x7 + NFTA_SET_ELEM_PAD = 0x8 + NFTA_SET_ELEM_OBJREF = 0x9 + NFTA_SET_ELEM_LIST_UNSPEC = 0x0 + NFTA_SET_ELEM_LIST_TABLE = 0x1 + NFTA_SET_ELEM_LIST_SET = 0x2 + NFTA_SET_ELEM_LIST_ELEMENTS = 0x3 + NFTA_SET_ELEM_LIST_SET_ID = 0x4 + NFT_DATA_VALUE = 0x0 + NFT_DATA_VERDICT = 0xffffff00 + NFTA_DATA_UNSPEC = 0x0 + NFTA_DATA_VALUE = 0x1 + NFTA_DATA_VERDICT = 0x2 + NFTA_VERDICT_UNSPEC = 0x0 + NFTA_VERDICT_CODE = 0x1 + NFTA_VERDICT_CHAIN = 0x2 + NFTA_EXPR_UNSPEC = 0x0 + NFTA_EXPR_NAME = 0x1 + NFTA_EXPR_DATA = 0x2 + NFTA_IMMEDIATE_UNSPEC = 0x0 + NFTA_IMMEDIATE_DREG = 0x1 + NFTA_IMMEDIATE_DATA = 0x2 + NFTA_BITWISE_UNSPEC = 0x0 + NFTA_BITWISE_SREG = 0x1 + NFTA_BITWISE_DREG = 0x2 + NFTA_BITWISE_LEN = 0x3 + NFTA_BITWISE_MASK = 0x4 + NFTA_BITWISE_XOR = 0x5 + NFT_BYTEORDER_NTOH = 0x0 + NFT_BYTEORDER_HTON = 0x1 + NFTA_BYTEORDER_UNSPEC = 0x0 + NFTA_BYTEORDER_SREG = 0x1 + NFTA_BYTEORDER_DREG = 0x2 + NFTA_BYTEORDER_OP = 0x3 + NFTA_BYTEORDER_LEN = 0x4 + NFTA_BYTEORDER_SIZE = 0x5 + NFT_CMP_EQ = 0x0 + NFT_CMP_NEQ = 0x1 + NFT_CMP_LT = 0x2 + NFT_CMP_LTE = 0x3 + NFT_CMP_GT = 0x4 + NFT_CMP_GTE = 0x5 + NFTA_CMP_UNSPEC = 0x0 + NFTA_CMP_SREG = 0x1 + NFTA_CMP_OP = 0x2 + NFTA_CMP_DATA = 0x3 + NFT_RANGE_EQ = 0x0 + NFT_RANGE_NEQ = 0x1 + NFTA_RANGE_UNSPEC = 0x0 + NFTA_RANGE_SREG = 0x1 + NFTA_RANGE_OP = 0x2 + NFTA_RANGE_FROM_DATA = 0x3 + NFTA_RANGE_TO_DATA = 0x4 + NFT_LOOKUP_F_INV = 0x1 + NFTA_LOOKUP_UNSPEC = 0x0 + NFTA_LOOKUP_SET = 0x1 + NFTA_LOOKUP_SREG = 0x2 + NFTA_LOOKUP_DREG = 0x3 + NFTA_LOOKUP_SET_ID = 0x4 + NFTA_LOOKUP_FLAGS = 0x5 + NFT_DYNSET_OP_ADD = 0x0 + NFT_DYNSET_OP_UPDATE = 0x1 + NFT_DYNSET_F_INV = 0x1 + NFTA_DYNSET_UNSPEC = 0x0 + NFTA_DYNSET_SET_NAME = 0x1 + NFTA_DYNSET_SET_ID = 0x2 + NFTA_DYNSET_OP = 0x3 + NFTA_DYNSET_SREG_KEY = 0x4 + NFTA_DYNSET_SREG_DATA = 0x5 + NFTA_DYNSET_TIMEOUT = 0x6 + NFTA_DYNSET_EXPR = 0x7 + NFTA_DYNSET_PAD = 0x8 + NFTA_DYNSET_FLAGS = 0x9 + NFT_PAYLOAD_LL_HEADER = 0x0 + NFT_PAYLOAD_NETWORK_HEADER = 0x1 + NFT_PAYLOAD_TRANSPORT_HEADER = 0x2 + NFT_PAYLOAD_CSUM_NONE = 0x0 + NFT_PAYLOAD_CSUM_INET = 0x1 + NFT_PAYLOAD_L4CSUM_PSEUDOHDR = 0x1 + NFTA_PAYLOAD_UNSPEC = 0x0 + NFTA_PAYLOAD_DREG = 0x1 + NFTA_PAYLOAD_BASE = 0x2 + NFTA_PAYLOAD_OFFSET = 0x3 + NFTA_PAYLOAD_LEN = 0x4 + NFTA_PAYLOAD_SREG = 0x5 + NFTA_PAYLOAD_CSUM_TYPE = 0x6 + NFTA_PAYLOAD_CSUM_OFFSET = 0x7 + NFTA_PAYLOAD_CSUM_FLAGS = 0x8 + NFT_EXTHDR_F_PRESENT = 0x1 + NFT_EXTHDR_OP_IPV6 = 0x0 + NFT_EXTHDR_OP_TCPOPT = 0x1 + NFTA_EXTHDR_UNSPEC = 0x0 + NFTA_EXTHDR_DREG = 0x1 + NFTA_EXTHDR_TYPE = 0x2 + NFTA_EXTHDR_OFFSET = 0x3 + NFTA_EXTHDR_LEN = 0x4 + NFTA_EXTHDR_FLAGS = 0x5 + NFTA_EXTHDR_OP = 0x6 + NFTA_EXTHDR_SREG = 0x7 + NFT_META_LEN = 0x0 + NFT_META_PROTOCOL = 0x1 + NFT_META_PRIORITY = 0x2 + NFT_META_MARK = 0x3 + NFT_META_IIF = 0x4 + NFT_META_OIF = 0x5 + NFT_META_IIFNAME = 0x6 + NFT_META_OIFNAME = 0x7 + NFT_META_IIFTYPE = 0x8 + NFT_META_OIFTYPE = 0x9 + NFT_META_SKUID = 0xa + NFT_META_SKGID = 0xb + NFT_META_NFTRACE = 0xc + NFT_META_RTCLASSID = 0xd + NFT_META_SECMARK = 0xe + NFT_META_NFPROTO = 0xf + NFT_META_L4PROTO = 0x10 + NFT_META_BRI_IIFNAME = 0x11 + NFT_META_BRI_OIFNAME = 0x12 + NFT_META_PKTTYPE = 0x13 + NFT_META_CPU = 0x14 + NFT_META_IIFGROUP = 0x15 + NFT_META_OIFGROUP = 0x16 + NFT_META_CGROUP = 0x17 + NFT_META_PRANDOM = 0x18 + NFT_RT_CLASSID = 0x0 + NFT_RT_NEXTHOP4 = 0x1 + NFT_RT_NEXTHOP6 = 0x2 + NFT_RT_TCPMSS = 0x3 + NFT_HASH_JENKINS = 0x0 + NFT_HASH_SYM = 0x1 + NFTA_HASH_UNSPEC = 0x0 + NFTA_HASH_SREG = 0x1 + NFTA_HASH_DREG = 0x2 + NFTA_HASH_LEN = 0x3 + NFTA_HASH_MODULUS = 0x4 + NFTA_HASH_SEED = 0x5 + NFTA_HASH_OFFSET = 0x6 + NFTA_HASH_TYPE = 0x7 + NFTA_META_UNSPEC = 0x0 + NFTA_META_DREG = 0x1 + NFTA_META_KEY = 0x2 + NFTA_META_SREG = 0x3 + NFTA_RT_UNSPEC = 0x0 + NFTA_RT_DREG = 0x1 + NFTA_RT_KEY = 0x2 + NFT_CT_STATE = 0x0 + NFT_CT_DIRECTION = 0x1 + NFT_CT_STATUS = 0x2 + NFT_CT_MARK = 0x3 + NFT_CT_SECMARK = 0x4 + NFT_CT_EXPIRATION = 0x5 + NFT_CT_HELPER = 0x6 + NFT_CT_L3PROTOCOL = 0x7 + NFT_CT_SRC = 0x8 + NFT_CT_DST = 0x9 + NFT_CT_PROTOCOL = 0xa + NFT_CT_PROTO_SRC = 0xb + NFT_CT_PROTO_DST = 0xc + NFT_CT_LABELS = 0xd + NFT_CT_PKTS = 0xe + NFT_CT_BYTES = 0xf + NFT_CT_AVGPKT = 0x10 + NFT_CT_ZONE = 0x11 + NFT_CT_EVENTMASK = 0x12 + NFTA_CT_UNSPEC = 0x0 + NFTA_CT_DREG = 0x1 + NFTA_CT_KEY = 0x2 + NFTA_CT_DIRECTION = 0x3 + NFTA_CT_SREG = 0x4 + NFT_LIMIT_PKTS = 0x0 + NFT_LIMIT_PKT_BYTES = 0x1 + NFT_LIMIT_F_INV = 0x1 + NFTA_LIMIT_UNSPEC = 0x0 + NFTA_LIMIT_RATE = 0x1 + NFTA_LIMIT_UNIT = 0x2 + NFTA_LIMIT_BURST = 0x3 + NFTA_LIMIT_TYPE = 0x4 + NFTA_LIMIT_FLAGS = 0x5 + NFTA_LIMIT_PAD = 0x6 + NFTA_COUNTER_UNSPEC = 0x0 + NFTA_COUNTER_BYTES = 0x1 + NFTA_COUNTER_PACKETS = 0x2 + NFTA_COUNTER_PAD = 0x3 + NFTA_LOG_UNSPEC = 0x0 + NFTA_LOG_GROUP = 0x1 + NFTA_LOG_PREFIX = 0x2 + NFTA_LOG_SNAPLEN = 0x3 + NFTA_LOG_QTHRESHOLD = 0x4 + NFTA_LOG_LEVEL = 0x5 + NFTA_LOG_FLAGS = 0x6 + NFTA_QUEUE_UNSPEC = 0x0 + NFTA_QUEUE_NUM = 0x1 + NFTA_QUEUE_TOTAL = 0x2 + NFTA_QUEUE_FLAGS = 0x3 + NFTA_QUEUE_SREG_QNUM = 0x4 + NFT_QUOTA_F_INV = 0x1 + NFT_QUOTA_F_DEPLETED = 0x2 + NFTA_QUOTA_UNSPEC = 0x0 + NFTA_QUOTA_BYTES = 0x1 + NFTA_QUOTA_FLAGS = 0x2 + NFTA_QUOTA_PAD = 0x3 + NFTA_QUOTA_CONSUMED = 0x4 + NFT_REJECT_ICMP_UNREACH = 0x0 + NFT_REJECT_TCP_RST = 0x1 + NFT_REJECT_ICMPX_UNREACH = 0x2 + NFT_REJECT_ICMPX_NO_ROUTE = 0x0 + NFT_REJECT_ICMPX_PORT_UNREACH = 0x1 + NFT_REJECT_ICMPX_HOST_UNREACH = 0x2 + NFT_REJECT_ICMPX_ADMIN_PROHIBITED = 0x3 + NFTA_REJECT_UNSPEC = 0x0 + NFTA_REJECT_TYPE = 0x1 + NFTA_REJECT_ICMP_CODE = 0x2 + NFT_NAT_SNAT = 0x0 + NFT_NAT_DNAT = 0x1 + NFTA_NAT_UNSPEC = 0x0 + NFTA_NAT_TYPE = 0x1 + NFTA_NAT_FAMILY = 0x2 + NFTA_NAT_REG_ADDR_MIN = 0x3 + NFTA_NAT_REG_ADDR_MAX = 0x4 + NFTA_NAT_REG_PROTO_MIN = 0x5 + NFTA_NAT_REG_PROTO_MAX = 0x6 + NFTA_NAT_FLAGS = 0x7 + NFTA_MASQ_UNSPEC = 0x0 + NFTA_MASQ_FLAGS = 0x1 + NFTA_MASQ_REG_PROTO_MIN = 0x2 + NFTA_MASQ_REG_PROTO_MAX = 0x3 + NFTA_REDIR_UNSPEC = 0x0 + NFTA_REDIR_REG_PROTO_MIN = 0x1 + NFTA_REDIR_REG_PROTO_MAX = 0x2 + NFTA_REDIR_FLAGS = 0x3 + NFTA_DUP_UNSPEC = 0x0 + NFTA_DUP_SREG_ADDR = 0x1 + NFTA_DUP_SREG_DEV = 0x2 + NFTA_FWD_UNSPEC = 0x0 + NFTA_FWD_SREG_DEV = 0x1 + NFTA_OBJREF_UNSPEC = 0x0 + NFTA_OBJREF_IMM_TYPE = 0x1 + NFTA_OBJREF_IMM_NAME = 0x2 + NFTA_OBJREF_SET_SREG = 0x3 + NFTA_OBJREF_SET_NAME = 0x4 + NFTA_OBJREF_SET_ID = 0x5 + NFTA_GEN_UNSPEC = 0x0 + NFTA_GEN_ID = 0x1 + NFTA_GEN_PROC_PID = 0x2 + NFTA_GEN_PROC_NAME = 0x3 + NFTA_FIB_UNSPEC = 0x0 + NFTA_FIB_DREG = 0x1 + NFTA_FIB_RESULT = 0x2 + NFTA_FIB_FLAGS = 0x3 + NFT_FIB_RESULT_UNSPEC = 0x0 + NFT_FIB_RESULT_OIF = 0x1 + NFT_FIB_RESULT_OIFNAME = 0x2 + NFT_FIB_RESULT_ADDRTYPE = 0x3 + NFTA_FIB_F_SADDR = 0x1 + NFTA_FIB_F_DADDR = 0x2 + NFTA_FIB_F_MARK = 0x4 + NFTA_FIB_F_IIF = 0x8 + NFTA_FIB_F_OIF = 0x10 + NFTA_FIB_F_PRESENT = 0x20 + NFTA_CT_HELPER_UNSPEC = 0x0 + NFTA_CT_HELPER_NAME = 0x1 + NFTA_CT_HELPER_L3PROTO = 0x2 + NFTA_CT_HELPER_L4PROTO = 0x3 + NFTA_OBJ_UNSPEC = 0x0 + NFTA_OBJ_TABLE = 0x1 + NFTA_OBJ_NAME = 0x2 + NFTA_OBJ_TYPE = 0x3 + NFTA_OBJ_DATA = 0x4 + NFTA_OBJ_USE = 0x5 + NFTA_TRACE_UNSPEC = 0x0 + NFTA_TRACE_TABLE = 0x1 + NFTA_TRACE_CHAIN = 0x2 + NFTA_TRACE_RULE_HANDLE = 0x3 + NFTA_TRACE_TYPE = 0x4 + NFTA_TRACE_VERDICT = 0x5 + NFTA_TRACE_ID = 0x6 + NFTA_TRACE_LL_HEADER = 0x7 + NFTA_TRACE_NETWORK_HEADER = 0x8 + NFTA_TRACE_TRANSPORT_HEADER = 0x9 + NFTA_TRACE_IIF = 0xa + NFTA_TRACE_IIFTYPE = 0xb + NFTA_TRACE_OIF = 0xc + NFTA_TRACE_OIFTYPE = 0xd + NFTA_TRACE_MARK = 0xe + NFTA_TRACE_NFPROTO = 0xf + NFTA_TRACE_POLICY = 0x10 + NFTA_TRACE_PAD = 0x11 + NFT_TRACETYPE_UNSPEC = 0x0 + NFT_TRACETYPE_POLICY = 0x1 + NFT_TRACETYPE_RETURN = 0x2 + NFT_TRACETYPE_RULE = 0x3 + NFTA_NG_UNSPEC = 0x0 + NFTA_NG_DREG = 0x1 + NFTA_NG_MODULUS = 0x2 + NFTA_NG_TYPE = 0x3 + NFTA_NG_OFFSET = 0x4 + NFT_NG_INCREMENTAL = 0x0 + NFT_NG_RANDOM = 0x1 +) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go index 7106b512d44..4f6f1455568 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go @@ -115,27 +115,10 @@ type Stat_t struct { Pad5 [14]int32 } -type Statfs_t struct { - Type int32 - Bsize int32 - Frsize int32 - _ [4]byte - Blocks uint64 - Bfree uint64 - Files uint64 - Ffree uint64 - Bavail uint64 - Fsid Fsid - Namelen int32 - Flags int32 - Spare [5]int32 - _ [4]byte -} - type StatxTimestamp struct { - Sec int64 - Nsec uint32 - X__reserved int32 + Sec int64 + Nsec uint32 + _ int32 } type Statx_t struct { @@ -172,7 +155,7 @@ type Dirent struct { } type Fsid struct { - X__val [2]int32 + Val [2]int32 } type Flock_t struct { @@ -494,7 +477,7 @@ const ( IFLA_EVENT = 0x2c IFLA_NEW_NETNSID = 0x2d IFLA_IF_NETNSID = 0x2e - IFLA_MAX = 0x2e + IFLA_MAX = 0x31 RT_SCOPE_UNIVERSE = 0x0 RT_SCOPE_SITE = 0xc8 RT_SCOPE_LINK = 0xfd @@ -586,12 +569,12 @@ type RtAttr struct { } type IfInfomsg struct { - Family uint8 - X__ifi_pad uint8 - Type uint16 - Index int32 - Flags uint32 - Change uint32 + Family uint8 + _ uint8 + Type uint16 + Index int32 + Flags uint32 + Change uint32 } type IfAddrmsg struct { @@ -676,7 +659,7 @@ type Sysinfo_t struct { Totalhigh uint32 Freehigh uint32 Unit uint32 - X_f [8]int8 + _ [8]int8 } type Utsname struct { @@ -733,7 +716,7 @@ const ( ) type Sigset_t struct { - X__val [32]uint32 + Val [32]uint32 } const RNDGETENTCNT = 0x40045200 @@ -889,3 +872,938 @@ const ( BDADDR_LE_PUBLIC = 0x1 BDADDR_LE_RANDOM = 0x2 ) + +type PerfEventAttr struct { + Type uint32 + Size uint32 + Config uint64 + Sample uint64 + Sample_type uint64 + Read_format uint64 + Bits uint64 + Wakeup uint32 + Bp_type uint32 + Ext1 uint64 + Ext2 uint64 + Branch_sample_type uint64 + Sample_regs_user uint64 + Sample_stack_user uint32 + Clockid int32 + Sample_regs_intr uint64 + Aux_watermark uint32 + _ uint32 +} + +type PerfEventMmapPage struct { + Version uint32 + Compat_version uint32 + Lock uint32 + Index uint32 + Offset int64 + Time_enabled uint64 + Time_running uint64 + Capabilities uint64 + Pmc_width uint16 + Time_shift uint16 + Time_mult uint32 + Time_offset uint64 + Time_zero uint64 + Size uint32 + _ [948]uint8 + Data_head uint64 + Data_tail uint64 + Data_offset uint64 + Data_size uint64 + Aux_head uint64 + Aux_tail uint64 + Aux_offset uint64 + Aux_size uint64 +} + +const ( + PerfBitDisabled uint64 = CBitFieldMaskBit0 + PerfBitInherit = CBitFieldMaskBit1 + PerfBitPinned = CBitFieldMaskBit2 + PerfBitExclusive = CBitFieldMaskBit3 + PerfBitExcludeUser = CBitFieldMaskBit4 + PerfBitExcludeKernel = CBitFieldMaskBit5 + PerfBitExcludeHv = CBitFieldMaskBit6 + PerfBitExcludeIdle = CBitFieldMaskBit7 + PerfBitMmap = CBitFieldMaskBit8 + PerfBitComm = CBitFieldMaskBit9 + PerfBitFreq = CBitFieldMaskBit10 + PerfBitInheritStat = CBitFieldMaskBit11 + PerfBitEnableOnExec = CBitFieldMaskBit12 + PerfBitTask = CBitFieldMaskBit13 + PerfBitWatermark = CBitFieldMaskBit14 + PerfBitPreciseIPBit1 = CBitFieldMaskBit15 + PerfBitPreciseIPBit2 = CBitFieldMaskBit16 + PerfBitMmapData = CBitFieldMaskBit17 + PerfBitSampleIDAll = CBitFieldMaskBit18 + PerfBitExcludeHost = CBitFieldMaskBit19 + PerfBitExcludeGuest = CBitFieldMaskBit20 + PerfBitExcludeCallchainKernel = CBitFieldMaskBit21 + PerfBitExcludeCallchainUser = CBitFieldMaskBit22 + PerfBitMmap2 = CBitFieldMaskBit23 + PerfBitCommExec = CBitFieldMaskBit24 + PerfBitUseClockID = CBitFieldMaskBit25 + PerfBitContextSwitch = CBitFieldMaskBit26 +) + +const ( + PERF_TYPE_HARDWARE = 0x0 + PERF_TYPE_SOFTWARE = 0x1 + PERF_TYPE_TRACEPOINT = 0x2 + PERF_TYPE_HW_CACHE = 0x3 + PERF_TYPE_RAW = 0x4 + PERF_TYPE_BREAKPOINT = 0x5 + + PERF_COUNT_HW_CPU_CYCLES = 0x0 + PERF_COUNT_HW_INSTRUCTIONS = 0x1 + PERF_COUNT_HW_CACHE_REFERENCES = 0x2 + PERF_COUNT_HW_CACHE_MISSES = 0x3 + PERF_COUNT_HW_BRANCH_INSTRUCTIONS = 0x4 + PERF_COUNT_HW_BRANCH_MISSES = 0x5 + PERF_COUNT_HW_BUS_CYCLES = 0x6 + PERF_COUNT_HW_STALLED_CYCLES_FRONTEND = 0x7 + PERF_COUNT_HW_STALLED_CYCLES_BACKEND = 0x8 + PERF_COUNT_HW_REF_CPU_CYCLES = 0x9 + + PERF_COUNT_HW_CACHE_L1D = 0x0 + PERF_COUNT_HW_CACHE_L1I = 0x1 + PERF_COUNT_HW_CACHE_LL = 0x2 + PERF_COUNT_HW_CACHE_DTLB = 0x3 + PERF_COUNT_HW_CACHE_ITLB = 0x4 + PERF_COUNT_HW_CACHE_BPU = 0x5 + PERF_COUNT_HW_CACHE_NODE = 0x6 + + PERF_COUNT_HW_CACHE_OP_READ = 0x0 + PERF_COUNT_HW_CACHE_OP_WRITE = 0x1 + PERF_COUNT_HW_CACHE_OP_PREFETCH = 0x2 + + PERF_COUNT_HW_CACHE_RESULT_ACCESS = 0x0 + PERF_COUNT_HW_CACHE_RESULT_MISS = 0x1 + + PERF_COUNT_SW_CPU_CLOCK = 0x0 + PERF_COUNT_SW_TASK_CLOCK = 0x1 + PERF_COUNT_SW_PAGE_FAULTS = 0x2 + PERF_COUNT_SW_CONTEXT_SWITCHES = 0x3 + PERF_COUNT_SW_CPU_MIGRATIONS = 0x4 + PERF_COUNT_SW_PAGE_FAULTS_MIN = 0x5 + PERF_COUNT_SW_PAGE_FAULTS_MAJ = 0x6 + PERF_COUNT_SW_ALIGNMENT_FAULTS = 0x7 + PERF_COUNT_SW_EMULATION_FAULTS = 0x8 + PERF_COUNT_SW_DUMMY = 0x9 + + PERF_SAMPLE_IP = 0x1 + PERF_SAMPLE_TID = 0x2 + PERF_SAMPLE_TIME = 0x4 + PERF_SAMPLE_ADDR = 0x8 + PERF_SAMPLE_READ = 0x10 + PERF_SAMPLE_CALLCHAIN = 0x20 + PERF_SAMPLE_ID = 0x40 + PERF_SAMPLE_CPU = 0x80 + PERF_SAMPLE_PERIOD = 0x100 + PERF_SAMPLE_STREAM_ID = 0x200 + PERF_SAMPLE_RAW = 0x400 + PERF_SAMPLE_BRANCH_STACK = 0x800 + + PERF_SAMPLE_BRANCH_USER = 0x1 + PERF_SAMPLE_BRANCH_KERNEL = 0x2 + PERF_SAMPLE_BRANCH_HV = 0x4 + PERF_SAMPLE_BRANCH_ANY = 0x8 + PERF_SAMPLE_BRANCH_ANY_CALL = 0x10 + PERF_SAMPLE_BRANCH_ANY_RETURN = 0x20 + PERF_SAMPLE_BRANCH_IND_CALL = 0x40 + + PERF_FORMAT_TOTAL_TIME_ENABLED = 0x1 + PERF_FORMAT_TOTAL_TIME_RUNNING = 0x2 + PERF_FORMAT_ID = 0x4 + PERF_FORMAT_GROUP = 0x8 + + PERF_RECORD_MMAP = 0x1 + PERF_RECORD_LOST = 0x2 + PERF_RECORD_COMM = 0x3 + PERF_RECORD_EXIT = 0x4 + PERF_RECORD_THROTTLE = 0x5 + PERF_RECORD_UNTHROTTLE = 0x6 + PERF_RECORD_FORK = 0x7 + PERF_RECORD_READ = 0x8 + PERF_RECORD_SAMPLE = 0x9 + + PERF_CONTEXT_HV = -0x20 + PERF_CONTEXT_KERNEL = -0x80 + PERF_CONTEXT_USER = -0x200 + + PERF_CONTEXT_GUEST = -0x800 + PERF_CONTEXT_GUEST_KERNEL = -0x880 + PERF_CONTEXT_GUEST_USER = -0xa00 + + PERF_FLAG_FD_NO_GROUP = 0x1 + PERF_FLAG_FD_OUTPUT = 0x2 + PERF_FLAG_PID_CGROUP = 0x4 +) + +const ( + CBitFieldMaskBit0 = 0x1 + CBitFieldMaskBit1 = 0x2 + CBitFieldMaskBit2 = 0x4 + CBitFieldMaskBit3 = 0x8 + CBitFieldMaskBit4 = 0x10 + CBitFieldMaskBit5 = 0x20 + CBitFieldMaskBit6 = 0x40 + CBitFieldMaskBit7 = 0x80 + CBitFieldMaskBit8 = 0x100 + CBitFieldMaskBit9 = 0x200 + CBitFieldMaskBit10 = 0x400 + CBitFieldMaskBit11 = 0x800 + CBitFieldMaskBit12 = 0x1000 + CBitFieldMaskBit13 = 0x2000 + CBitFieldMaskBit14 = 0x4000 + CBitFieldMaskBit15 = 0x8000 + CBitFieldMaskBit16 = 0x10000 + CBitFieldMaskBit17 = 0x20000 + CBitFieldMaskBit18 = 0x40000 + CBitFieldMaskBit19 = 0x80000 + CBitFieldMaskBit20 = 0x100000 + CBitFieldMaskBit21 = 0x200000 + CBitFieldMaskBit22 = 0x400000 + CBitFieldMaskBit23 = 0x800000 + CBitFieldMaskBit24 = 0x1000000 + CBitFieldMaskBit25 = 0x2000000 + CBitFieldMaskBit26 = 0x4000000 + CBitFieldMaskBit27 = 0x8000000 + CBitFieldMaskBit28 = 0x10000000 + CBitFieldMaskBit29 = 0x20000000 + CBitFieldMaskBit30 = 0x40000000 + CBitFieldMaskBit31 = 0x80000000 + CBitFieldMaskBit32 = 0x100000000 + CBitFieldMaskBit33 = 0x200000000 + CBitFieldMaskBit34 = 0x400000000 + CBitFieldMaskBit35 = 0x800000000 + CBitFieldMaskBit36 = 0x1000000000 + CBitFieldMaskBit37 = 0x2000000000 + CBitFieldMaskBit38 = 0x4000000000 + CBitFieldMaskBit39 = 0x8000000000 + CBitFieldMaskBit40 = 0x10000000000 + CBitFieldMaskBit41 = 0x20000000000 + CBitFieldMaskBit42 = 0x40000000000 + CBitFieldMaskBit43 = 0x80000000000 + CBitFieldMaskBit44 = 0x100000000000 + CBitFieldMaskBit45 = 0x200000000000 + CBitFieldMaskBit46 = 0x400000000000 + CBitFieldMaskBit47 = 0x800000000000 + CBitFieldMaskBit48 = 0x1000000000000 + CBitFieldMaskBit49 = 0x2000000000000 + CBitFieldMaskBit50 = 0x4000000000000 + CBitFieldMaskBit51 = 0x8000000000000 + CBitFieldMaskBit52 = 0x10000000000000 + CBitFieldMaskBit53 = 0x20000000000000 + CBitFieldMaskBit54 = 0x40000000000000 + CBitFieldMaskBit55 = 0x80000000000000 + CBitFieldMaskBit56 = 0x100000000000000 + CBitFieldMaskBit57 = 0x200000000000000 + CBitFieldMaskBit58 = 0x400000000000000 + CBitFieldMaskBit59 = 0x800000000000000 + CBitFieldMaskBit60 = 0x1000000000000000 + CBitFieldMaskBit61 = 0x2000000000000000 + CBitFieldMaskBit62 = 0x4000000000000000 + CBitFieldMaskBit63 = 0x8000000000000000 +) + +type SockaddrStorage struct { + Family uint16 + _ [122]int8 + _ uint32 +} + +type TCPMD5Sig struct { + Addr SockaddrStorage + Flags uint8 + Prefixlen uint8 + Keylen uint16 + _ uint32 + Key [80]uint8 +} + +type HDDriveCmdHdr struct { + Command uint8 + Number uint8 + Feature uint8 + Count uint8 +} + +type HDGeometry struct { + Heads uint8 + Sectors uint8 + Cylinders uint16 + Start uint32 +} + +type HDDriveID struct { + Config uint16 + Cyls uint16 + Reserved2 uint16 + Heads uint16 + Track_bytes uint16 + Sector_bytes uint16 + Sectors uint16 + Vendor0 uint16 + Vendor1 uint16 + Vendor2 uint16 + Serial_no [20]uint8 + Buf_type uint16 + Buf_size uint16 + Ecc_bytes uint16 + Fw_rev [8]uint8 + Model [40]uint8 + Max_multsect uint8 + Vendor3 uint8 + Dword_io uint16 + Vendor4 uint8 + Capability uint8 + Reserved50 uint16 + Vendor5 uint8 + TPIO uint8 + Vendor6 uint8 + TDMA uint8 + Field_valid uint16 + Cur_cyls uint16 + Cur_heads uint16 + Cur_sectors uint16 + Cur_capacity0 uint16 + Cur_capacity1 uint16 + Multsect uint8 + Multsect_valid uint8 + Lba_capacity uint32 + Dma_1word uint16 + Dma_mword uint16 + Eide_pio_modes uint16 + Eide_dma_min uint16 + Eide_dma_time uint16 + Eide_pio uint16 + Eide_pio_iordy uint16 + Words69_70 [2]uint16 + Words71_74 [4]uint16 + Queue_depth uint16 + Words76_79 [4]uint16 + Major_rev_num uint16 + Minor_rev_num uint16 + Command_set_1 uint16 + Command_set_2 uint16 + Cfsse uint16 + Cfs_enable_1 uint16 + Cfs_enable_2 uint16 + Csf_default uint16 + Dma_ultra uint16 + Trseuc uint16 + TrsEuc uint16 + CurAPMvalues uint16 + Mprc uint16 + Hw_config uint16 + Acoustic uint16 + Msrqs uint16 + Sxfert uint16 + Sal uint16 + Spg uint32 + Lba_capacity_2 uint64 + Words104_125 [22]uint16 + Last_lun uint16 + Word127 uint16 + Dlf uint16 + Csfo uint16 + Words130_155 [26]uint16 + Word156 uint16 + Words157_159 [3]uint16 + Cfa_power uint16 + Words161_175 [15]uint16 + Words176_205 [30]uint16 + Words206_254 [49]uint16 + Integrity_word uint16 +} + +type Statfs_t struct { + Type int32 + Bsize int32 + Frsize int32 + _ [4]byte + Blocks uint64 + Bfree uint64 + Files uint64 + Ffree uint64 + Bavail uint64 + Fsid Fsid + Namelen int32 + Flags int32 + Spare [5]int32 + _ [4]byte +} + +const ( + ST_MANDLOCK = 0x40 + ST_NOATIME = 0x400 + ST_NODEV = 0x4 + ST_NODIRATIME = 0x800 + ST_NOEXEC = 0x8 + ST_NOSUID = 0x2 + ST_RDONLY = 0x1 + ST_RELATIME = 0x1000 + ST_SYNCHRONOUS = 0x10 +) + +type TpacketHdr struct { + Status uint32 + Len uint32 + Snaplen uint32 + Mac uint16 + Net uint16 + Sec uint32 + Usec uint32 +} + +type Tpacket2Hdr struct { + Status uint32 + Len uint32 + Snaplen uint32 + Mac uint16 + Net uint16 + Sec uint32 + Nsec uint32 + Vlan_tci uint16 + Vlan_tpid uint16 + _ [4]uint8 +} + +type Tpacket3Hdr struct { + Next_offset uint32 + Sec uint32 + Nsec uint32 + Snaplen uint32 + Len uint32 + Status uint32 + Mac uint16 + Net uint16 + Hv1 TpacketHdrVariant1 + _ [8]uint8 +} + +type TpacketHdrVariant1 struct { + Rxhash uint32 + Vlan_tci uint32 + Vlan_tpid uint16 + _ uint16 +} + +type TpacketBlockDesc struct { + Version uint32 + To_priv uint32 + Hdr [40]byte +} + +type TpacketReq struct { + Block_size uint32 + Block_nr uint32 + Frame_size uint32 + Frame_nr uint32 +} + +type TpacketReq3 struct { + Block_size uint32 + Block_nr uint32 + Frame_size uint32 + Frame_nr uint32 + Retire_blk_tov uint32 + Sizeof_priv uint32 + Feature_req_word uint32 +} + +type TpacketStats struct { + Packets uint32 + Drops uint32 +} + +type TpacketStatsV3 struct { + Packets uint32 + Drops uint32 + Freeze_q_cnt uint32 +} + +type TpacketAuxdata struct { + Status uint32 + Len uint32 + Snaplen uint32 + Mac uint16 + Net uint16 + Vlan_tci uint16 + Vlan_tpid uint16 +} + +const ( + TPACKET_V1 = 0x0 + TPACKET_V2 = 0x1 + TPACKET_V3 = 0x2 +) + +const ( + SizeofTpacketHdr = 0x18 + SizeofTpacket2Hdr = 0x20 + SizeofTpacket3Hdr = 0x30 +) + +const ( + NF_INET_PRE_ROUTING = 0x0 + NF_INET_LOCAL_IN = 0x1 + NF_INET_FORWARD = 0x2 + NF_INET_LOCAL_OUT = 0x3 + NF_INET_POST_ROUTING = 0x4 + NF_INET_NUMHOOKS = 0x5 +) + +const ( + NF_NETDEV_INGRESS = 0x0 + NF_NETDEV_NUMHOOKS = 0x1 +) + +const ( + NFPROTO_UNSPEC = 0x0 + NFPROTO_INET = 0x1 + NFPROTO_IPV4 = 0x2 + NFPROTO_ARP = 0x3 + NFPROTO_NETDEV = 0x5 + NFPROTO_BRIDGE = 0x7 + NFPROTO_IPV6 = 0xa + NFPROTO_DECNET = 0xc + NFPROTO_NUMPROTO = 0xd +) + +type Nfgenmsg struct { + Nfgen_family uint8 + Version uint8 + Res_id uint16 +} + +const ( + NFNL_BATCH_UNSPEC = 0x0 + NFNL_BATCH_GENID = 0x1 +) + +const ( + NFT_REG_VERDICT = 0x0 + NFT_REG_1 = 0x1 + NFT_REG_2 = 0x2 + NFT_REG_3 = 0x3 + NFT_REG_4 = 0x4 + NFT_REG32_00 = 0x8 + NFT_REG32_01 = 0x9 + NFT_REG32_02 = 0xa + NFT_REG32_03 = 0xb + NFT_REG32_04 = 0xc + NFT_REG32_05 = 0xd + NFT_REG32_06 = 0xe + NFT_REG32_07 = 0xf + NFT_REG32_08 = 0x10 + NFT_REG32_09 = 0x11 + NFT_REG32_10 = 0x12 + NFT_REG32_11 = 0x13 + NFT_REG32_12 = 0x14 + NFT_REG32_13 = 0x15 + NFT_REG32_14 = 0x16 + NFT_REG32_15 = 0x17 + NFT_CONTINUE = -0x1 + NFT_BREAK = -0x2 + NFT_JUMP = -0x3 + NFT_GOTO = -0x4 + NFT_RETURN = -0x5 + NFT_MSG_NEWTABLE = 0x0 + NFT_MSG_GETTABLE = 0x1 + NFT_MSG_DELTABLE = 0x2 + NFT_MSG_NEWCHAIN = 0x3 + NFT_MSG_GETCHAIN = 0x4 + NFT_MSG_DELCHAIN = 0x5 + NFT_MSG_NEWRULE = 0x6 + NFT_MSG_GETRULE = 0x7 + NFT_MSG_DELRULE = 0x8 + NFT_MSG_NEWSET = 0x9 + NFT_MSG_GETSET = 0xa + NFT_MSG_DELSET = 0xb + NFT_MSG_NEWSETELEM = 0xc + NFT_MSG_GETSETELEM = 0xd + NFT_MSG_DELSETELEM = 0xe + NFT_MSG_NEWGEN = 0xf + NFT_MSG_GETGEN = 0x10 + NFT_MSG_TRACE = 0x11 + NFT_MSG_NEWOBJ = 0x12 + NFT_MSG_GETOBJ = 0x13 + NFT_MSG_DELOBJ = 0x14 + NFT_MSG_GETOBJ_RESET = 0x15 + NFT_MSG_MAX = 0x19 + NFTA_LIST_UNPEC = 0x0 + NFTA_LIST_ELEM = 0x1 + NFTA_HOOK_UNSPEC = 0x0 + NFTA_HOOK_HOOKNUM = 0x1 + NFTA_HOOK_PRIORITY = 0x2 + NFTA_HOOK_DEV = 0x3 + NFT_TABLE_F_DORMANT = 0x1 + NFTA_TABLE_UNSPEC = 0x0 + NFTA_TABLE_NAME = 0x1 + NFTA_TABLE_FLAGS = 0x2 + NFTA_TABLE_USE = 0x3 + NFTA_CHAIN_UNSPEC = 0x0 + NFTA_CHAIN_TABLE = 0x1 + NFTA_CHAIN_HANDLE = 0x2 + NFTA_CHAIN_NAME = 0x3 + NFTA_CHAIN_HOOK = 0x4 + NFTA_CHAIN_POLICY = 0x5 + NFTA_CHAIN_USE = 0x6 + NFTA_CHAIN_TYPE = 0x7 + NFTA_CHAIN_COUNTERS = 0x8 + NFTA_CHAIN_PAD = 0x9 + NFTA_RULE_UNSPEC = 0x0 + NFTA_RULE_TABLE = 0x1 + NFTA_RULE_CHAIN = 0x2 + NFTA_RULE_HANDLE = 0x3 + NFTA_RULE_EXPRESSIONS = 0x4 + NFTA_RULE_COMPAT = 0x5 + NFTA_RULE_POSITION = 0x6 + NFTA_RULE_USERDATA = 0x7 + NFTA_RULE_PAD = 0x8 + NFTA_RULE_ID = 0x9 + NFT_RULE_COMPAT_F_INV = 0x2 + NFT_RULE_COMPAT_F_MASK = 0x2 + NFTA_RULE_COMPAT_UNSPEC = 0x0 + NFTA_RULE_COMPAT_PROTO = 0x1 + NFTA_RULE_COMPAT_FLAGS = 0x2 + NFT_SET_ANONYMOUS = 0x1 + NFT_SET_CONSTANT = 0x2 + NFT_SET_INTERVAL = 0x4 + NFT_SET_MAP = 0x8 + NFT_SET_TIMEOUT = 0x10 + NFT_SET_EVAL = 0x20 + NFT_SET_OBJECT = 0x40 + NFT_SET_POL_PERFORMANCE = 0x0 + NFT_SET_POL_MEMORY = 0x1 + NFTA_SET_DESC_UNSPEC = 0x0 + NFTA_SET_DESC_SIZE = 0x1 + NFTA_SET_UNSPEC = 0x0 + NFTA_SET_TABLE = 0x1 + NFTA_SET_NAME = 0x2 + NFTA_SET_FLAGS = 0x3 + NFTA_SET_KEY_TYPE = 0x4 + NFTA_SET_KEY_LEN = 0x5 + NFTA_SET_DATA_TYPE = 0x6 + NFTA_SET_DATA_LEN = 0x7 + NFTA_SET_POLICY = 0x8 + NFTA_SET_DESC = 0x9 + NFTA_SET_ID = 0xa + NFTA_SET_TIMEOUT = 0xb + NFTA_SET_GC_INTERVAL = 0xc + NFTA_SET_USERDATA = 0xd + NFTA_SET_PAD = 0xe + NFTA_SET_OBJ_TYPE = 0xf + NFT_SET_ELEM_INTERVAL_END = 0x1 + NFTA_SET_ELEM_UNSPEC = 0x0 + NFTA_SET_ELEM_KEY = 0x1 + NFTA_SET_ELEM_DATA = 0x2 + NFTA_SET_ELEM_FLAGS = 0x3 + NFTA_SET_ELEM_TIMEOUT = 0x4 + NFTA_SET_ELEM_EXPIRATION = 0x5 + NFTA_SET_ELEM_USERDATA = 0x6 + NFTA_SET_ELEM_EXPR = 0x7 + NFTA_SET_ELEM_PAD = 0x8 + NFTA_SET_ELEM_OBJREF = 0x9 + NFTA_SET_ELEM_LIST_UNSPEC = 0x0 + NFTA_SET_ELEM_LIST_TABLE = 0x1 + NFTA_SET_ELEM_LIST_SET = 0x2 + NFTA_SET_ELEM_LIST_ELEMENTS = 0x3 + NFTA_SET_ELEM_LIST_SET_ID = 0x4 + NFT_DATA_VALUE = 0x0 + NFT_DATA_VERDICT = 0xffffff00 + NFTA_DATA_UNSPEC = 0x0 + NFTA_DATA_VALUE = 0x1 + NFTA_DATA_VERDICT = 0x2 + NFTA_VERDICT_UNSPEC = 0x0 + NFTA_VERDICT_CODE = 0x1 + NFTA_VERDICT_CHAIN = 0x2 + NFTA_EXPR_UNSPEC = 0x0 + NFTA_EXPR_NAME = 0x1 + NFTA_EXPR_DATA = 0x2 + NFTA_IMMEDIATE_UNSPEC = 0x0 + NFTA_IMMEDIATE_DREG = 0x1 + NFTA_IMMEDIATE_DATA = 0x2 + NFTA_BITWISE_UNSPEC = 0x0 + NFTA_BITWISE_SREG = 0x1 + NFTA_BITWISE_DREG = 0x2 + NFTA_BITWISE_LEN = 0x3 + NFTA_BITWISE_MASK = 0x4 + NFTA_BITWISE_XOR = 0x5 + NFT_BYTEORDER_NTOH = 0x0 + NFT_BYTEORDER_HTON = 0x1 + NFTA_BYTEORDER_UNSPEC = 0x0 + NFTA_BYTEORDER_SREG = 0x1 + NFTA_BYTEORDER_DREG = 0x2 + NFTA_BYTEORDER_OP = 0x3 + NFTA_BYTEORDER_LEN = 0x4 + NFTA_BYTEORDER_SIZE = 0x5 + NFT_CMP_EQ = 0x0 + NFT_CMP_NEQ = 0x1 + NFT_CMP_LT = 0x2 + NFT_CMP_LTE = 0x3 + NFT_CMP_GT = 0x4 + NFT_CMP_GTE = 0x5 + NFTA_CMP_UNSPEC = 0x0 + NFTA_CMP_SREG = 0x1 + NFTA_CMP_OP = 0x2 + NFTA_CMP_DATA = 0x3 + NFT_RANGE_EQ = 0x0 + NFT_RANGE_NEQ = 0x1 + NFTA_RANGE_UNSPEC = 0x0 + NFTA_RANGE_SREG = 0x1 + NFTA_RANGE_OP = 0x2 + NFTA_RANGE_FROM_DATA = 0x3 + NFTA_RANGE_TO_DATA = 0x4 + NFT_LOOKUP_F_INV = 0x1 + NFTA_LOOKUP_UNSPEC = 0x0 + NFTA_LOOKUP_SET = 0x1 + NFTA_LOOKUP_SREG = 0x2 + NFTA_LOOKUP_DREG = 0x3 + NFTA_LOOKUP_SET_ID = 0x4 + NFTA_LOOKUP_FLAGS = 0x5 + NFT_DYNSET_OP_ADD = 0x0 + NFT_DYNSET_OP_UPDATE = 0x1 + NFT_DYNSET_F_INV = 0x1 + NFTA_DYNSET_UNSPEC = 0x0 + NFTA_DYNSET_SET_NAME = 0x1 + NFTA_DYNSET_SET_ID = 0x2 + NFTA_DYNSET_OP = 0x3 + NFTA_DYNSET_SREG_KEY = 0x4 + NFTA_DYNSET_SREG_DATA = 0x5 + NFTA_DYNSET_TIMEOUT = 0x6 + NFTA_DYNSET_EXPR = 0x7 + NFTA_DYNSET_PAD = 0x8 + NFTA_DYNSET_FLAGS = 0x9 + NFT_PAYLOAD_LL_HEADER = 0x0 + NFT_PAYLOAD_NETWORK_HEADER = 0x1 + NFT_PAYLOAD_TRANSPORT_HEADER = 0x2 + NFT_PAYLOAD_CSUM_NONE = 0x0 + NFT_PAYLOAD_CSUM_INET = 0x1 + NFT_PAYLOAD_L4CSUM_PSEUDOHDR = 0x1 + NFTA_PAYLOAD_UNSPEC = 0x0 + NFTA_PAYLOAD_DREG = 0x1 + NFTA_PAYLOAD_BASE = 0x2 + NFTA_PAYLOAD_OFFSET = 0x3 + NFTA_PAYLOAD_LEN = 0x4 + NFTA_PAYLOAD_SREG = 0x5 + NFTA_PAYLOAD_CSUM_TYPE = 0x6 + NFTA_PAYLOAD_CSUM_OFFSET = 0x7 + NFTA_PAYLOAD_CSUM_FLAGS = 0x8 + NFT_EXTHDR_F_PRESENT = 0x1 + NFT_EXTHDR_OP_IPV6 = 0x0 + NFT_EXTHDR_OP_TCPOPT = 0x1 + NFTA_EXTHDR_UNSPEC = 0x0 + NFTA_EXTHDR_DREG = 0x1 + NFTA_EXTHDR_TYPE = 0x2 + NFTA_EXTHDR_OFFSET = 0x3 + NFTA_EXTHDR_LEN = 0x4 + NFTA_EXTHDR_FLAGS = 0x5 + NFTA_EXTHDR_OP = 0x6 + NFTA_EXTHDR_SREG = 0x7 + NFT_META_LEN = 0x0 + NFT_META_PROTOCOL = 0x1 + NFT_META_PRIORITY = 0x2 + NFT_META_MARK = 0x3 + NFT_META_IIF = 0x4 + NFT_META_OIF = 0x5 + NFT_META_IIFNAME = 0x6 + NFT_META_OIFNAME = 0x7 + NFT_META_IIFTYPE = 0x8 + NFT_META_OIFTYPE = 0x9 + NFT_META_SKUID = 0xa + NFT_META_SKGID = 0xb + NFT_META_NFTRACE = 0xc + NFT_META_RTCLASSID = 0xd + NFT_META_SECMARK = 0xe + NFT_META_NFPROTO = 0xf + NFT_META_L4PROTO = 0x10 + NFT_META_BRI_IIFNAME = 0x11 + NFT_META_BRI_OIFNAME = 0x12 + NFT_META_PKTTYPE = 0x13 + NFT_META_CPU = 0x14 + NFT_META_IIFGROUP = 0x15 + NFT_META_OIFGROUP = 0x16 + NFT_META_CGROUP = 0x17 + NFT_META_PRANDOM = 0x18 + NFT_RT_CLASSID = 0x0 + NFT_RT_NEXTHOP4 = 0x1 + NFT_RT_NEXTHOP6 = 0x2 + NFT_RT_TCPMSS = 0x3 + NFT_HASH_JENKINS = 0x0 + NFT_HASH_SYM = 0x1 + NFTA_HASH_UNSPEC = 0x0 + NFTA_HASH_SREG = 0x1 + NFTA_HASH_DREG = 0x2 + NFTA_HASH_LEN = 0x3 + NFTA_HASH_MODULUS = 0x4 + NFTA_HASH_SEED = 0x5 + NFTA_HASH_OFFSET = 0x6 + NFTA_HASH_TYPE = 0x7 + NFTA_META_UNSPEC = 0x0 + NFTA_META_DREG = 0x1 + NFTA_META_KEY = 0x2 + NFTA_META_SREG = 0x3 + NFTA_RT_UNSPEC = 0x0 + NFTA_RT_DREG = 0x1 + NFTA_RT_KEY = 0x2 + NFT_CT_STATE = 0x0 + NFT_CT_DIRECTION = 0x1 + NFT_CT_STATUS = 0x2 + NFT_CT_MARK = 0x3 + NFT_CT_SECMARK = 0x4 + NFT_CT_EXPIRATION = 0x5 + NFT_CT_HELPER = 0x6 + NFT_CT_L3PROTOCOL = 0x7 + NFT_CT_SRC = 0x8 + NFT_CT_DST = 0x9 + NFT_CT_PROTOCOL = 0xa + NFT_CT_PROTO_SRC = 0xb + NFT_CT_PROTO_DST = 0xc + NFT_CT_LABELS = 0xd + NFT_CT_PKTS = 0xe + NFT_CT_BYTES = 0xf + NFT_CT_AVGPKT = 0x10 + NFT_CT_ZONE = 0x11 + NFT_CT_EVENTMASK = 0x12 + NFTA_CT_UNSPEC = 0x0 + NFTA_CT_DREG = 0x1 + NFTA_CT_KEY = 0x2 + NFTA_CT_DIRECTION = 0x3 + NFTA_CT_SREG = 0x4 + NFT_LIMIT_PKTS = 0x0 + NFT_LIMIT_PKT_BYTES = 0x1 + NFT_LIMIT_F_INV = 0x1 + NFTA_LIMIT_UNSPEC = 0x0 + NFTA_LIMIT_RATE = 0x1 + NFTA_LIMIT_UNIT = 0x2 + NFTA_LIMIT_BURST = 0x3 + NFTA_LIMIT_TYPE = 0x4 + NFTA_LIMIT_FLAGS = 0x5 + NFTA_LIMIT_PAD = 0x6 + NFTA_COUNTER_UNSPEC = 0x0 + NFTA_COUNTER_BYTES = 0x1 + NFTA_COUNTER_PACKETS = 0x2 + NFTA_COUNTER_PAD = 0x3 + NFTA_LOG_UNSPEC = 0x0 + NFTA_LOG_GROUP = 0x1 + NFTA_LOG_PREFIX = 0x2 + NFTA_LOG_SNAPLEN = 0x3 + NFTA_LOG_QTHRESHOLD = 0x4 + NFTA_LOG_LEVEL = 0x5 + NFTA_LOG_FLAGS = 0x6 + NFTA_QUEUE_UNSPEC = 0x0 + NFTA_QUEUE_NUM = 0x1 + NFTA_QUEUE_TOTAL = 0x2 + NFTA_QUEUE_FLAGS = 0x3 + NFTA_QUEUE_SREG_QNUM = 0x4 + NFT_QUOTA_F_INV = 0x1 + NFT_QUOTA_F_DEPLETED = 0x2 + NFTA_QUOTA_UNSPEC = 0x0 + NFTA_QUOTA_BYTES = 0x1 + NFTA_QUOTA_FLAGS = 0x2 + NFTA_QUOTA_PAD = 0x3 + NFTA_QUOTA_CONSUMED = 0x4 + NFT_REJECT_ICMP_UNREACH = 0x0 + NFT_REJECT_TCP_RST = 0x1 + NFT_REJECT_ICMPX_UNREACH = 0x2 + NFT_REJECT_ICMPX_NO_ROUTE = 0x0 + NFT_REJECT_ICMPX_PORT_UNREACH = 0x1 + NFT_REJECT_ICMPX_HOST_UNREACH = 0x2 + NFT_REJECT_ICMPX_ADMIN_PROHIBITED = 0x3 + NFTA_REJECT_UNSPEC = 0x0 + NFTA_REJECT_TYPE = 0x1 + NFTA_REJECT_ICMP_CODE = 0x2 + NFT_NAT_SNAT = 0x0 + NFT_NAT_DNAT = 0x1 + NFTA_NAT_UNSPEC = 0x0 + NFTA_NAT_TYPE = 0x1 + NFTA_NAT_FAMILY = 0x2 + NFTA_NAT_REG_ADDR_MIN = 0x3 + NFTA_NAT_REG_ADDR_MAX = 0x4 + NFTA_NAT_REG_PROTO_MIN = 0x5 + NFTA_NAT_REG_PROTO_MAX = 0x6 + NFTA_NAT_FLAGS = 0x7 + NFTA_MASQ_UNSPEC = 0x0 + NFTA_MASQ_FLAGS = 0x1 + NFTA_MASQ_REG_PROTO_MIN = 0x2 + NFTA_MASQ_REG_PROTO_MAX = 0x3 + NFTA_REDIR_UNSPEC = 0x0 + NFTA_REDIR_REG_PROTO_MIN = 0x1 + NFTA_REDIR_REG_PROTO_MAX = 0x2 + NFTA_REDIR_FLAGS = 0x3 + NFTA_DUP_UNSPEC = 0x0 + NFTA_DUP_SREG_ADDR = 0x1 + NFTA_DUP_SREG_DEV = 0x2 + NFTA_FWD_UNSPEC = 0x0 + NFTA_FWD_SREG_DEV = 0x1 + NFTA_OBJREF_UNSPEC = 0x0 + NFTA_OBJREF_IMM_TYPE = 0x1 + NFTA_OBJREF_IMM_NAME = 0x2 + NFTA_OBJREF_SET_SREG = 0x3 + NFTA_OBJREF_SET_NAME = 0x4 + NFTA_OBJREF_SET_ID = 0x5 + NFTA_GEN_UNSPEC = 0x0 + NFTA_GEN_ID = 0x1 + NFTA_GEN_PROC_PID = 0x2 + NFTA_GEN_PROC_NAME = 0x3 + NFTA_FIB_UNSPEC = 0x0 + NFTA_FIB_DREG = 0x1 + NFTA_FIB_RESULT = 0x2 + NFTA_FIB_FLAGS = 0x3 + NFT_FIB_RESULT_UNSPEC = 0x0 + NFT_FIB_RESULT_OIF = 0x1 + NFT_FIB_RESULT_OIFNAME = 0x2 + NFT_FIB_RESULT_ADDRTYPE = 0x3 + NFTA_FIB_F_SADDR = 0x1 + NFTA_FIB_F_DADDR = 0x2 + NFTA_FIB_F_MARK = 0x4 + NFTA_FIB_F_IIF = 0x8 + NFTA_FIB_F_OIF = 0x10 + NFTA_FIB_F_PRESENT = 0x20 + NFTA_CT_HELPER_UNSPEC = 0x0 + NFTA_CT_HELPER_NAME = 0x1 + NFTA_CT_HELPER_L3PROTO = 0x2 + NFTA_CT_HELPER_L4PROTO = 0x3 + NFTA_OBJ_UNSPEC = 0x0 + NFTA_OBJ_TABLE = 0x1 + NFTA_OBJ_NAME = 0x2 + NFTA_OBJ_TYPE = 0x3 + NFTA_OBJ_DATA = 0x4 + NFTA_OBJ_USE = 0x5 + NFTA_TRACE_UNSPEC = 0x0 + NFTA_TRACE_TABLE = 0x1 + NFTA_TRACE_CHAIN = 0x2 + NFTA_TRACE_RULE_HANDLE = 0x3 + NFTA_TRACE_TYPE = 0x4 + NFTA_TRACE_VERDICT = 0x5 + NFTA_TRACE_ID = 0x6 + NFTA_TRACE_LL_HEADER = 0x7 + NFTA_TRACE_NETWORK_HEADER = 0x8 + NFTA_TRACE_TRANSPORT_HEADER = 0x9 + NFTA_TRACE_IIF = 0xa + NFTA_TRACE_IIFTYPE = 0xb + NFTA_TRACE_OIF = 0xc + NFTA_TRACE_OIFTYPE = 0xd + NFTA_TRACE_MARK = 0xe + NFTA_TRACE_NFPROTO = 0xf + NFTA_TRACE_POLICY = 0x10 + NFTA_TRACE_PAD = 0x11 + NFT_TRACETYPE_UNSPEC = 0x0 + NFT_TRACETYPE_POLICY = 0x1 + NFT_TRACETYPE_RETURN = 0x2 + NFT_TRACETYPE_RULE = 0x3 + NFTA_NG_UNSPEC = 0x0 + NFTA_NG_DREG = 0x1 + NFTA_NG_MODULUS = 0x2 + NFTA_NG_TYPE = 0x3 + NFTA_NG_OFFSET = 0x4 + NFT_NG_INCREMENTAL = 0x0 + NFT_NG_RANDOM = 0x1 +) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go index 319071c8b66..9ddd47015e3 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go @@ -105,7 +105,7 @@ type Stat_t struct { Mode uint32 Uid uint32 Gid uint32 - X__pad2 int32 + _ int32 Rdev uint64 Size int64 Blksize int64 @@ -118,25 +118,10 @@ type Stat_t struct { _ uint64 } -type Statfs_t struct { - Type int64 - Bsize int64 - Blocks uint64 - Bfree uint64 - Bavail uint64 - Files uint64 - Ffree uint64 - Fsid Fsid - Namelen int64 - Frsize int64 - Flags int64 - Spare [4]int64 -} - type StatxTimestamp struct { - Sec int64 - Nsec uint32 - X__reserved int32 + Sec int64 + Nsec uint32 + _ int32 } type Statx_t struct { @@ -173,7 +158,7 @@ type Dirent struct { } type Fsid struct { - X__val [2]int32 + Val [2]int32 } type Flock_t struct { @@ -497,7 +482,7 @@ const ( IFLA_EVENT = 0x2c IFLA_NEW_NETNSID = 0x2d IFLA_IF_NETNSID = 0x2e - IFLA_MAX = 0x2e + IFLA_MAX = 0x31 RT_SCOPE_UNIVERSE = 0x0 RT_SCOPE_SITE = 0xc8 RT_SCOPE_LINK = 0xfd @@ -589,12 +574,12 @@ type RtAttr struct { } type IfInfomsg struct { - Family uint8 - X__ifi_pad uint8 - Type uint16 - Index int32 - Flags uint32 - Change uint32 + Family uint8 + _ uint8 + Type uint16 + Index int32 + Flags uint32 + Change uint32 } type IfAddrmsg struct { @@ -686,7 +671,7 @@ type Sysinfo_t struct { Totalhigh uint64 Freehigh uint64 Unit uint32 - X_f [0]uint8 + _ [0]uint8 _ [4]byte } @@ -709,10 +694,10 @@ type Ustat_t struct { } type EpollEvent struct { - Events uint32 - X_padFd int32 - Fd int32 - Pad int32 + Events uint32 + _ int32 + Fd int32 + Pad int32 } const ( @@ -746,7 +731,7 @@ const ( ) type Sigset_t struct { - X__val [16]uint64 + Val [16]uint64 } const RNDGETENTCNT = 0x40045200 @@ -902,3 +887,938 @@ const ( BDADDR_LE_PUBLIC = 0x1 BDADDR_LE_RANDOM = 0x2 ) + +type PerfEventAttr struct { + Type uint32 + Size uint32 + Config uint64 + Sample uint64 + Sample_type uint64 + Read_format uint64 + Bits uint64 + Wakeup uint32 + Bp_type uint32 + Ext1 uint64 + Ext2 uint64 + Branch_sample_type uint64 + Sample_regs_user uint64 + Sample_stack_user uint32 + Clockid int32 + Sample_regs_intr uint64 + Aux_watermark uint32 + _ uint32 +} + +type PerfEventMmapPage struct { + Version uint32 + Compat_version uint32 + Lock uint32 + Index uint32 + Offset int64 + Time_enabled uint64 + Time_running uint64 + Capabilities uint64 + Pmc_width uint16 + Time_shift uint16 + Time_mult uint32 + Time_offset uint64 + Time_zero uint64 + Size uint32 + _ [948]uint8 + Data_head uint64 + Data_tail uint64 + Data_offset uint64 + Data_size uint64 + Aux_head uint64 + Aux_tail uint64 + Aux_offset uint64 + Aux_size uint64 +} + +const ( + PerfBitDisabled uint64 = CBitFieldMaskBit0 + PerfBitInherit = CBitFieldMaskBit1 + PerfBitPinned = CBitFieldMaskBit2 + PerfBitExclusive = CBitFieldMaskBit3 + PerfBitExcludeUser = CBitFieldMaskBit4 + PerfBitExcludeKernel = CBitFieldMaskBit5 + PerfBitExcludeHv = CBitFieldMaskBit6 + PerfBitExcludeIdle = CBitFieldMaskBit7 + PerfBitMmap = CBitFieldMaskBit8 + PerfBitComm = CBitFieldMaskBit9 + PerfBitFreq = CBitFieldMaskBit10 + PerfBitInheritStat = CBitFieldMaskBit11 + PerfBitEnableOnExec = CBitFieldMaskBit12 + PerfBitTask = CBitFieldMaskBit13 + PerfBitWatermark = CBitFieldMaskBit14 + PerfBitPreciseIPBit1 = CBitFieldMaskBit15 + PerfBitPreciseIPBit2 = CBitFieldMaskBit16 + PerfBitMmapData = CBitFieldMaskBit17 + PerfBitSampleIDAll = CBitFieldMaskBit18 + PerfBitExcludeHost = CBitFieldMaskBit19 + PerfBitExcludeGuest = CBitFieldMaskBit20 + PerfBitExcludeCallchainKernel = CBitFieldMaskBit21 + PerfBitExcludeCallchainUser = CBitFieldMaskBit22 + PerfBitMmap2 = CBitFieldMaskBit23 + PerfBitCommExec = CBitFieldMaskBit24 + PerfBitUseClockID = CBitFieldMaskBit25 + PerfBitContextSwitch = CBitFieldMaskBit26 +) + +const ( + PERF_TYPE_HARDWARE = 0x0 + PERF_TYPE_SOFTWARE = 0x1 + PERF_TYPE_TRACEPOINT = 0x2 + PERF_TYPE_HW_CACHE = 0x3 + PERF_TYPE_RAW = 0x4 + PERF_TYPE_BREAKPOINT = 0x5 + + PERF_COUNT_HW_CPU_CYCLES = 0x0 + PERF_COUNT_HW_INSTRUCTIONS = 0x1 + PERF_COUNT_HW_CACHE_REFERENCES = 0x2 + PERF_COUNT_HW_CACHE_MISSES = 0x3 + PERF_COUNT_HW_BRANCH_INSTRUCTIONS = 0x4 + PERF_COUNT_HW_BRANCH_MISSES = 0x5 + PERF_COUNT_HW_BUS_CYCLES = 0x6 + PERF_COUNT_HW_STALLED_CYCLES_FRONTEND = 0x7 + PERF_COUNT_HW_STALLED_CYCLES_BACKEND = 0x8 + PERF_COUNT_HW_REF_CPU_CYCLES = 0x9 + + PERF_COUNT_HW_CACHE_L1D = 0x0 + PERF_COUNT_HW_CACHE_L1I = 0x1 + PERF_COUNT_HW_CACHE_LL = 0x2 + PERF_COUNT_HW_CACHE_DTLB = 0x3 + PERF_COUNT_HW_CACHE_ITLB = 0x4 + PERF_COUNT_HW_CACHE_BPU = 0x5 + PERF_COUNT_HW_CACHE_NODE = 0x6 + + PERF_COUNT_HW_CACHE_OP_READ = 0x0 + PERF_COUNT_HW_CACHE_OP_WRITE = 0x1 + PERF_COUNT_HW_CACHE_OP_PREFETCH = 0x2 + + PERF_COUNT_HW_CACHE_RESULT_ACCESS = 0x0 + PERF_COUNT_HW_CACHE_RESULT_MISS = 0x1 + + PERF_COUNT_SW_CPU_CLOCK = 0x0 + PERF_COUNT_SW_TASK_CLOCK = 0x1 + PERF_COUNT_SW_PAGE_FAULTS = 0x2 + PERF_COUNT_SW_CONTEXT_SWITCHES = 0x3 + PERF_COUNT_SW_CPU_MIGRATIONS = 0x4 + PERF_COUNT_SW_PAGE_FAULTS_MIN = 0x5 + PERF_COUNT_SW_PAGE_FAULTS_MAJ = 0x6 + PERF_COUNT_SW_ALIGNMENT_FAULTS = 0x7 + PERF_COUNT_SW_EMULATION_FAULTS = 0x8 + PERF_COUNT_SW_DUMMY = 0x9 + + PERF_SAMPLE_IP = 0x1 + PERF_SAMPLE_TID = 0x2 + PERF_SAMPLE_TIME = 0x4 + PERF_SAMPLE_ADDR = 0x8 + PERF_SAMPLE_READ = 0x10 + PERF_SAMPLE_CALLCHAIN = 0x20 + PERF_SAMPLE_ID = 0x40 + PERF_SAMPLE_CPU = 0x80 + PERF_SAMPLE_PERIOD = 0x100 + PERF_SAMPLE_STREAM_ID = 0x200 + PERF_SAMPLE_RAW = 0x400 + PERF_SAMPLE_BRANCH_STACK = 0x800 + + PERF_SAMPLE_BRANCH_USER = 0x1 + PERF_SAMPLE_BRANCH_KERNEL = 0x2 + PERF_SAMPLE_BRANCH_HV = 0x4 + PERF_SAMPLE_BRANCH_ANY = 0x8 + PERF_SAMPLE_BRANCH_ANY_CALL = 0x10 + PERF_SAMPLE_BRANCH_ANY_RETURN = 0x20 + PERF_SAMPLE_BRANCH_IND_CALL = 0x40 + + PERF_FORMAT_TOTAL_TIME_ENABLED = 0x1 + PERF_FORMAT_TOTAL_TIME_RUNNING = 0x2 + PERF_FORMAT_ID = 0x4 + PERF_FORMAT_GROUP = 0x8 + + PERF_RECORD_MMAP = 0x1 + PERF_RECORD_LOST = 0x2 + PERF_RECORD_COMM = 0x3 + PERF_RECORD_EXIT = 0x4 + PERF_RECORD_THROTTLE = 0x5 + PERF_RECORD_UNTHROTTLE = 0x6 + PERF_RECORD_FORK = 0x7 + PERF_RECORD_READ = 0x8 + PERF_RECORD_SAMPLE = 0x9 + + PERF_CONTEXT_HV = -0x20 + PERF_CONTEXT_KERNEL = -0x80 + PERF_CONTEXT_USER = -0x200 + + PERF_CONTEXT_GUEST = -0x800 + PERF_CONTEXT_GUEST_KERNEL = -0x880 + PERF_CONTEXT_GUEST_USER = -0xa00 + + PERF_FLAG_FD_NO_GROUP = 0x1 + PERF_FLAG_FD_OUTPUT = 0x2 + PERF_FLAG_PID_CGROUP = 0x4 +) + +const ( + CBitFieldMaskBit0 = 0x8000000000000000 + CBitFieldMaskBit1 = 0x4000000000000000 + CBitFieldMaskBit2 = 0x2000000000000000 + CBitFieldMaskBit3 = 0x1000000000000000 + CBitFieldMaskBit4 = 0x800000000000000 + CBitFieldMaskBit5 = 0x400000000000000 + CBitFieldMaskBit6 = 0x200000000000000 + CBitFieldMaskBit7 = 0x100000000000000 + CBitFieldMaskBit8 = 0x80000000000000 + CBitFieldMaskBit9 = 0x40000000000000 + CBitFieldMaskBit10 = 0x20000000000000 + CBitFieldMaskBit11 = 0x10000000000000 + CBitFieldMaskBit12 = 0x8000000000000 + CBitFieldMaskBit13 = 0x4000000000000 + CBitFieldMaskBit14 = 0x2000000000000 + CBitFieldMaskBit15 = 0x1000000000000 + CBitFieldMaskBit16 = 0x800000000000 + CBitFieldMaskBit17 = 0x400000000000 + CBitFieldMaskBit18 = 0x200000000000 + CBitFieldMaskBit19 = 0x100000000000 + CBitFieldMaskBit20 = 0x80000000000 + CBitFieldMaskBit21 = 0x40000000000 + CBitFieldMaskBit22 = 0x20000000000 + CBitFieldMaskBit23 = 0x10000000000 + CBitFieldMaskBit24 = 0x8000000000 + CBitFieldMaskBit25 = 0x4000000000 + CBitFieldMaskBit26 = 0x2000000000 + CBitFieldMaskBit27 = 0x1000000000 + CBitFieldMaskBit28 = 0x800000000 + CBitFieldMaskBit29 = 0x400000000 + CBitFieldMaskBit30 = 0x200000000 + CBitFieldMaskBit31 = 0x100000000 + CBitFieldMaskBit32 = 0x80000000 + CBitFieldMaskBit33 = 0x40000000 + CBitFieldMaskBit34 = 0x20000000 + CBitFieldMaskBit35 = 0x10000000 + CBitFieldMaskBit36 = 0x8000000 + CBitFieldMaskBit37 = 0x4000000 + CBitFieldMaskBit38 = 0x2000000 + CBitFieldMaskBit39 = 0x1000000 + CBitFieldMaskBit40 = 0x800000 + CBitFieldMaskBit41 = 0x400000 + CBitFieldMaskBit42 = 0x200000 + CBitFieldMaskBit43 = 0x100000 + CBitFieldMaskBit44 = 0x80000 + CBitFieldMaskBit45 = 0x40000 + CBitFieldMaskBit46 = 0x20000 + CBitFieldMaskBit47 = 0x10000 + CBitFieldMaskBit48 = 0x8000 + CBitFieldMaskBit49 = 0x4000 + CBitFieldMaskBit50 = 0x2000 + CBitFieldMaskBit51 = 0x1000 + CBitFieldMaskBit52 = 0x800 + CBitFieldMaskBit53 = 0x400 + CBitFieldMaskBit54 = 0x200 + CBitFieldMaskBit55 = 0x100 + CBitFieldMaskBit56 = 0x80 + CBitFieldMaskBit57 = 0x40 + CBitFieldMaskBit58 = 0x20 + CBitFieldMaskBit59 = 0x10 + CBitFieldMaskBit60 = 0x8 + CBitFieldMaskBit61 = 0x4 + CBitFieldMaskBit62 = 0x2 + CBitFieldMaskBit63 = 0x1 +) + +type SockaddrStorage struct { + Family uint16 + _ [118]uint8 + _ uint64 +} + +type TCPMD5Sig struct { + Addr SockaddrStorage + Flags uint8 + Prefixlen uint8 + Keylen uint16 + _ uint32 + Key [80]uint8 +} + +type HDDriveCmdHdr struct { + Command uint8 + Number uint8 + Feature uint8 + Count uint8 +} + +type HDGeometry struct { + Heads uint8 + Sectors uint8 + Cylinders uint16 + _ [4]byte + Start uint64 +} + +type HDDriveID struct { + Config uint16 + Cyls uint16 + Reserved2 uint16 + Heads uint16 + Track_bytes uint16 + Sector_bytes uint16 + Sectors uint16 + Vendor0 uint16 + Vendor1 uint16 + Vendor2 uint16 + Serial_no [20]uint8 + Buf_type uint16 + Buf_size uint16 + Ecc_bytes uint16 + Fw_rev [8]uint8 + Model [40]uint8 + Max_multsect uint8 + Vendor3 uint8 + Dword_io uint16 + Vendor4 uint8 + Capability uint8 + Reserved50 uint16 + Vendor5 uint8 + TPIO uint8 + Vendor6 uint8 + TDMA uint8 + Field_valid uint16 + Cur_cyls uint16 + Cur_heads uint16 + Cur_sectors uint16 + Cur_capacity0 uint16 + Cur_capacity1 uint16 + Multsect uint8 + Multsect_valid uint8 + Lba_capacity uint32 + Dma_1word uint16 + Dma_mword uint16 + Eide_pio_modes uint16 + Eide_dma_min uint16 + Eide_dma_time uint16 + Eide_pio uint16 + Eide_pio_iordy uint16 + Words69_70 [2]uint16 + Words71_74 [4]uint16 + Queue_depth uint16 + Words76_79 [4]uint16 + Major_rev_num uint16 + Minor_rev_num uint16 + Command_set_1 uint16 + Command_set_2 uint16 + Cfsse uint16 + Cfs_enable_1 uint16 + Cfs_enable_2 uint16 + Csf_default uint16 + Dma_ultra uint16 + Trseuc uint16 + TrsEuc uint16 + CurAPMvalues uint16 + Mprc uint16 + Hw_config uint16 + Acoustic uint16 + Msrqs uint16 + Sxfert uint16 + Sal uint16 + Spg uint32 + Lba_capacity_2 uint64 + Words104_125 [22]uint16 + Last_lun uint16 + Word127 uint16 + Dlf uint16 + Csfo uint16 + Words130_155 [26]uint16 + Word156 uint16 + Words157_159 [3]uint16 + Cfa_power uint16 + Words161_175 [15]uint16 + Words176_205 [30]uint16 + Words206_254 [49]uint16 + Integrity_word uint16 +} + +type Statfs_t struct { + Type int64 + Bsize int64 + Blocks uint64 + Bfree uint64 + Bavail uint64 + Files uint64 + Ffree uint64 + Fsid Fsid + Namelen int64 + Frsize int64 + Flags int64 + Spare [4]int64 +} + +const ( + ST_MANDLOCK = 0x40 + ST_NOATIME = 0x400 + ST_NODEV = 0x4 + ST_NODIRATIME = 0x800 + ST_NOEXEC = 0x8 + ST_NOSUID = 0x2 + ST_RDONLY = 0x1 + ST_RELATIME = 0x1000 + ST_SYNCHRONOUS = 0x10 +) + +type TpacketHdr struct { + Status uint64 + Len uint32 + Snaplen uint32 + Mac uint16 + Net uint16 + Sec uint32 + Usec uint32 + _ [4]byte +} + +type Tpacket2Hdr struct { + Status uint32 + Len uint32 + Snaplen uint32 + Mac uint16 + Net uint16 + Sec uint32 + Nsec uint32 + Vlan_tci uint16 + Vlan_tpid uint16 + _ [4]uint8 +} + +type Tpacket3Hdr struct { + Next_offset uint32 + Sec uint32 + Nsec uint32 + Snaplen uint32 + Len uint32 + Status uint32 + Mac uint16 + Net uint16 + Hv1 TpacketHdrVariant1 + _ [8]uint8 +} + +type TpacketHdrVariant1 struct { + Rxhash uint32 + Vlan_tci uint32 + Vlan_tpid uint16 + _ uint16 +} + +type TpacketBlockDesc struct { + Version uint32 + To_priv uint32 + Hdr [40]byte +} + +type TpacketReq struct { + Block_size uint32 + Block_nr uint32 + Frame_size uint32 + Frame_nr uint32 +} + +type TpacketReq3 struct { + Block_size uint32 + Block_nr uint32 + Frame_size uint32 + Frame_nr uint32 + Retire_blk_tov uint32 + Sizeof_priv uint32 + Feature_req_word uint32 +} + +type TpacketStats struct { + Packets uint32 + Drops uint32 +} + +type TpacketStatsV3 struct { + Packets uint32 + Drops uint32 + Freeze_q_cnt uint32 +} + +type TpacketAuxdata struct { + Status uint32 + Len uint32 + Snaplen uint32 + Mac uint16 + Net uint16 + Vlan_tci uint16 + Vlan_tpid uint16 +} + +const ( + TPACKET_V1 = 0x0 + TPACKET_V2 = 0x1 + TPACKET_V3 = 0x2 +) + +const ( + SizeofTpacketHdr = 0x20 + SizeofTpacket2Hdr = 0x20 + SizeofTpacket3Hdr = 0x30 +) + +const ( + NF_INET_PRE_ROUTING = 0x0 + NF_INET_LOCAL_IN = 0x1 + NF_INET_FORWARD = 0x2 + NF_INET_LOCAL_OUT = 0x3 + NF_INET_POST_ROUTING = 0x4 + NF_INET_NUMHOOKS = 0x5 +) + +const ( + NF_NETDEV_INGRESS = 0x0 + NF_NETDEV_NUMHOOKS = 0x1 +) + +const ( + NFPROTO_UNSPEC = 0x0 + NFPROTO_INET = 0x1 + NFPROTO_IPV4 = 0x2 + NFPROTO_ARP = 0x3 + NFPROTO_NETDEV = 0x5 + NFPROTO_BRIDGE = 0x7 + NFPROTO_IPV6 = 0xa + NFPROTO_DECNET = 0xc + NFPROTO_NUMPROTO = 0xd +) + +type Nfgenmsg struct { + Nfgen_family uint8 + Version uint8 + Res_id uint16 +} + +const ( + NFNL_BATCH_UNSPEC = 0x0 + NFNL_BATCH_GENID = 0x1 +) + +const ( + NFT_REG_VERDICT = 0x0 + NFT_REG_1 = 0x1 + NFT_REG_2 = 0x2 + NFT_REG_3 = 0x3 + NFT_REG_4 = 0x4 + NFT_REG32_00 = 0x8 + NFT_REG32_01 = 0x9 + NFT_REG32_02 = 0xa + NFT_REG32_03 = 0xb + NFT_REG32_04 = 0xc + NFT_REG32_05 = 0xd + NFT_REG32_06 = 0xe + NFT_REG32_07 = 0xf + NFT_REG32_08 = 0x10 + NFT_REG32_09 = 0x11 + NFT_REG32_10 = 0x12 + NFT_REG32_11 = 0x13 + NFT_REG32_12 = 0x14 + NFT_REG32_13 = 0x15 + NFT_REG32_14 = 0x16 + NFT_REG32_15 = 0x17 + NFT_CONTINUE = -0x1 + NFT_BREAK = -0x2 + NFT_JUMP = -0x3 + NFT_GOTO = -0x4 + NFT_RETURN = -0x5 + NFT_MSG_NEWTABLE = 0x0 + NFT_MSG_GETTABLE = 0x1 + NFT_MSG_DELTABLE = 0x2 + NFT_MSG_NEWCHAIN = 0x3 + NFT_MSG_GETCHAIN = 0x4 + NFT_MSG_DELCHAIN = 0x5 + NFT_MSG_NEWRULE = 0x6 + NFT_MSG_GETRULE = 0x7 + NFT_MSG_DELRULE = 0x8 + NFT_MSG_NEWSET = 0x9 + NFT_MSG_GETSET = 0xa + NFT_MSG_DELSET = 0xb + NFT_MSG_NEWSETELEM = 0xc + NFT_MSG_GETSETELEM = 0xd + NFT_MSG_DELSETELEM = 0xe + NFT_MSG_NEWGEN = 0xf + NFT_MSG_GETGEN = 0x10 + NFT_MSG_TRACE = 0x11 + NFT_MSG_NEWOBJ = 0x12 + NFT_MSG_GETOBJ = 0x13 + NFT_MSG_DELOBJ = 0x14 + NFT_MSG_GETOBJ_RESET = 0x15 + NFT_MSG_MAX = 0x19 + NFTA_LIST_UNPEC = 0x0 + NFTA_LIST_ELEM = 0x1 + NFTA_HOOK_UNSPEC = 0x0 + NFTA_HOOK_HOOKNUM = 0x1 + NFTA_HOOK_PRIORITY = 0x2 + NFTA_HOOK_DEV = 0x3 + NFT_TABLE_F_DORMANT = 0x1 + NFTA_TABLE_UNSPEC = 0x0 + NFTA_TABLE_NAME = 0x1 + NFTA_TABLE_FLAGS = 0x2 + NFTA_TABLE_USE = 0x3 + NFTA_CHAIN_UNSPEC = 0x0 + NFTA_CHAIN_TABLE = 0x1 + NFTA_CHAIN_HANDLE = 0x2 + NFTA_CHAIN_NAME = 0x3 + NFTA_CHAIN_HOOK = 0x4 + NFTA_CHAIN_POLICY = 0x5 + NFTA_CHAIN_USE = 0x6 + NFTA_CHAIN_TYPE = 0x7 + NFTA_CHAIN_COUNTERS = 0x8 + NFTA_CHAIN_PAD = 0x9 + NFTA_RULE_UNSPEC = 0x0 + NFTA_RULE_TABLE = 0x1 + NFTA_RULE_CHAIN = 0x2 + NFTA_RULE_HANDLE = 0x3 + NFTA_RULE_EXPRESSIONS = 0x4 + NFTA_RULE_COMPAT = 0x5 + NFTA_RULE_POSITION = 0x6 + NFTA_RULE_USERDATA = 0x7 + NFTA_RULE_PAD = 0x8 + NFTA_RULE_ID = 0x9 + NFT_RULE_COMPAT_F_INV = 0x2 + NFT_RULE_COMPAT_F_MASK = 0x2 + NFTA_RULE_COMPAT_UNSPEC = 0x0 + NFTA_RULE_COMPAT_PROTO = 0x1 + NFTA_RULE_COMPAT_FLAGS = 0x2 + NFT_SET_ANONYMOUS = 0x1 + NFT_SET_CONSTANT = 0x2 + NFT_SET_INTERVAL = 0x4 + NFT_SET_MAP = 0x8 + NFT_SET_TIMEOUT = 0x10 + NFT_SET_EVAL = 0x20 + NFT_SET_OBJECT = 0x40 + NFT_SET_POL_PERFORMANCE = 0x0 + NFT_SET_POL_MEMORY = 0x1 + NFTA_SET_DESC_UNSPEC = 0x0 + NFTA_SET_DESC_SIZE = 0x1 + NFTA_SET_UNSPEC = 0x0 + NFTA_SET_TABLE = 0x1 + NFTA_SET_NAME = 0x2 + NFTA_SET_FLAGS = 0x3 + NFTA_SET_KEY_TYPE = 0x4 + NFTA_SET_KEY_LEN = 0x5 + NFTA_SET_DATA_TYPE = 0x6 + NFTA_SET_DATA_LEN = 0x7 + NFTA_SET_POLICY = 0x8 + NFTA_SET_DESC = 0x9 + NFTA_SET_ID = 0xa + NFTA_SET_TIMEOUT = 0xb + NFTA_SET_GC_INTERVAL = 0xc + NFTA_SET_USERDATA = 0xd + NFTA_SET_PAD = 0xe + NFTA_SET_OBJ_TYPE = 0xf + NFT_SET_ELEM_INTERVAL_END = 0x1 + NFTA_SET_ELEM_UNSPEC = 0x0 + NFTA_SET_ELEM_KEY = 0x1 + NFTA_SET_ELEM_DATA = 0x2 + NFTA_SET_ELEM_FLAGS = 0x3 + NFTA_SET_ELEM_TIMEOUT = 0x4 + NFTA_SET_ELEM_EXPIRATION = 0x5 + NFTA_SET_ELEM_USERDATA = 0x6 + NFTA_SET_ELEM_EXPR = 0x7 + NFTA_SET_ELEM_PAD = 0x8 + NFTA_SET_ELEM_OBJREF = 0x9 + NFTA_SET_ELEM_LIST_UNSPEC = 0x0 + NFTA_SET_ELEM_LIST_TABLE = 0x1 + NFTA_SET_ELEM_LIST_SET = 0x2 + NFTA_SET_ELEM_LIST_ELEMENTS = 0x3 + NFTA_SET_ELEM_LIST_SET_ID = 0x4 + NFT_DATA_VALUE = 0x0 + NFT_DATA_VERDICT = 0xffffff00 + NFTA_DATA_UNSPEC = 0x0 + NFTA_DATA_VALUE = 0x1 + NFTA_DATA_VERDICT = 0x2 + NFTA_VERDICT_UNSPEC = 0x0 + NFTA_VERDICT_CODE = 0x1 + NFTA_VERDICT_CHAIN = 0x2 + NFTA_EXPR_UNSPEC = 0x0 + NFTA_EXPR_NAME = 0x1 + NFTA_EXPR_DATA = 0x2 + NFTA_IMMEDIATE_UNSPEC = 0x0 + NFTA_IMMEDIATE_DREG = 0x1 + NFTA_IMMEDIATE_DATA = 0x2 + NFTA_BITWISE_UNSPEC = 0x0 + NFTA_BITWISE_SREG = 0x1 + NFTA_BITWISE_DREG = 0x2 + NFTA_BITWISE_LEN = 0x3 + NFTA_BITWISE_MASK = 0x4 + NFTA_BITWISE_XOR = 0x5 + NFT_BYTEORDER_NTOH = 0x0 + NFT_BYTEORDER_HTON = 0x1 + NFTA_BYTEORDER_UNSPEC = 0x0 + NFTA_BYTEORDER_SREG = 0x1 + NFTA_BYTEORDER_DREG = 0x2 + NFTA_BYTEORDER_OP = 0x3 + NFTA_BYTEORDER_LEN = 0x4 + NFTA_BYTEORDER_SIZE = 0x5 + NFT_CMP_EQ = 0x0 + NFT_CMP_NEQ = 0x1 + NFT_CMP_LT = 0x2 + NFT_CMP_LTE = 0x3 + NFT_CMP_GT = 0x4 + NFT_CMP_GTE = 0x5 + NFTA_CMP_UNSPEC = 0x0 + NFTA_CMP_SREG = 0x1 + NFTA_CMP_OP = 0x2 + NFTA_CMP_DATA = 0x3 + NFT_RANGE_EQ = 0x0 + NFT_RANGE_NEQ = 0x1 + NFTA_RANGE_UNSPEC = 0x0 + NFTA_RANGE_SREG = 0x1 + NFTA_RANGE_OP = 0x2 + NFTA_RANGE_FROM_DATA = 0x3 + NFTA_RANGE_TO_DATA = 0x4 + NFT_LOOKUP_F_INV = 0x1 + NFTA_LOOKUP_UNSPEC = 0x0 + NFTA_LOOKUP_SET = 0x1 + NFTA_LOOKUP_SREG = 0x2 + NFTA_LOOKUP_DREG = 0x3 + NFTA_LOOKUP_SET_ID = 0x4 + NFTA_LOOKUP_FLAGS = 0x5 + NFT_DYNSET_OP_ADD = 0x0 + NFT_DYNSET_OP_UPDATE = 0x1 + NFT_DYNSET_F_INV = 0x1 + NFTA_DYNSET_UNSPEC = 0x0 + NFTA_DYNSET_SET_NAME = 0x1 + NFTA_DYNSET_SET_ID = 0x2 + NFTA_DYNSET_OP = 0x3 + NFTA_DYNSET_SREG_KEY = 0x4 + NFTA_DYNSET_SREG_DATA = 0x5 + NFTA_DYNSET_TIMEOUT = 0x6 + NFTA_DYNSET_EXPR = 0x7 + NFTA_DYNSET_PAD = 0x8 + NFTA_DYNSET_FLAGS = 0x9 + NFT_PAYLOAD_LL_HEADER = 0x0 + NFT_PAYLOAD_NETWORK_HEADER = 0x1 + NFT_PAYLOAD_TRANSPORT_HEADER = 0x2 + NFT_PAYLOAD_CSUM_NONE = 0x0 + NFT_PAYLOAD_CSUM_INET = 0x1 + NFT_PAYLOAD_L4CSUM_PSEUDOHDR = 0x1 + NFTA_PAYLOAD_UNSPEC = 0x0 + NFTA_PAYLOAD_DREG = 0x1 + NFTA_PAYLOAD_BASE = 0x2 + NFTA_PAYLOAD_OFFSET = 0x3 + NFTA_PAYLOAD_LEN = 0x4 + NFTA_PAYLOAD_SREG = 0x5 + NFTA_PAYLOAD_CSUM_TYPE = 0x6 + NFTA_PAYLOAD_CSUM_OFFSET = 0x7 + NFTA_PAYLOAD_CSUM_FLAGS = 0x8 + NFT_EXTHDR_F_PRESENT = 0x1 + NFT_EXTHDR_OP_IPV6 = 0x0 + NFT_EXTHDR_OP_TCPOPT = 0x1 + NFTA_EXTHDR_UNSPEC = 0x0 + NFTA_EXTHDR_DREG = 0x1 + NFTA_EXTHDR_TYPE = 0x2 + NFTA_EXTHDR_OFFSET = 0x3 + NFTA_EXTHDR_LEN = 0x4 + NFTA_EXTHDR_FLAGS = 0x5 + NFTA_EXTHDR_OP = 0x6 + NFTA_EXTHDR_SREG = 0x7 + NFT_META_LEN = 0x0 + NFT_META_PROTOCOL = 0x1 + NFT_META_PRIORITY = 0x2 + NFT_META_MARK = 0x3 + NFT_META_IIF = 0x4 + NFT_META_OIF = 0x5 + NFT_META_IIFNAME = 0x6 + NFT_META_OIFNAME = 0x7 + NFT_META_IIFTYPE = 0x8 + NFT_META_OIFTYPE = 0x9 + NFT_META_SKUID = 0xa + NFT_META_SKGID = 0xb + NFT_META_NFTRACE = 0xc + NFT_META_RTCLASSID = 0xd + NFT_META_SECMARK = 0xe + NFT_META_NFPROTO = 0xf + NFT_META_L4PROTO = 0x10 + NFT_META_BRI_IIFNAME = 0x11 + NFT_META_BRI_OIFNAME = 0x12 + NFT_META_PKTTYPE = 0x13 + NFT_META_CPU = 0x14 + NFT_META_IIFGROUP = 0x15 + NFT_META_OIFGROUP = 0x16 + NFT_META_CGROUP = 0x17 + NFT_META_PRANDOM = 0x18 + NFT_RT_CLASSID = 0x0 + NFT_RT_NEXTHOP4 = 0x1 + NFT_RT_NEXTHOP6 = 0x2 + NFT_RT_TCPMSS = 0x3 + NFT_HASH_JENKINS = 0x0 + NFT_HASH_SYM = 0x1 + NFTA_HASH_UNSPEC = 0x0 + NFTA_HASH_SREG = 0x1 + NFTA_HASH_DREG = 0x2 + NFTA_HASH_LEN = 0x3 + NFTA_HASH_MODULUS = 0x4 + NFTA_HASH_SEED = 0x5 + NFTA_HASH_OFFSET = 0x6 + NFTA_HASH_TYPE = 0x7 + NFTA_META_UNSPEC = 0x0 + NFTA_META_DREG = 0x1 + NFTA_META_KEY = 0x2 + NFTA_META_SREG = 0x3 + NFTA_RT_UNSPEC = 0x0 + NFTA_RT_DREG = 0x1 + NFTA_RT_KEY = 0x2 + NFT_CT_STATE = 0x0 + NFT_CT_DIRECTION = 0x1 + NFT_CT_STATUS = 0x2 + NFT_CT_MARK = 0x3 + NFT_CT_SECMARK = 0x4 + NFT_CT_EXPIRATION = 0x5 + NFT_CT_HELPER = 0x6 + NFT_CT_L3PROTOCOL = 0x7 + NFT_CT_SRC = 0x8 + NFT_CT_DST = 0x9 + NFT_CT_PROTOCOL = 0xa + NFT_CT_PROTO_SRC = 0xb + NFT_CT_PROTO_DST = 0xc + NFT_CT_LABELS = 0xd + NFT_CT_PKTS = 0xe + NFT_CT_BYTES = 0xf + NFT_CT_AVGPKT = 0x10 + NFT_CT_ZONE = 0x11 + NFT_CT_EVENTMASK = 0x12 + NFTA_CT_UNSPEC = 0x0 + NFTA_CT_DREG = 0x1 + NFTA_CT_KEY = 0x2 + NFTA_CT_DIRECTION = 0x3 + NFTA_CT_SREG = 0x4 + NFT_LIMIT_PKTS = 0x0 + NFT_LIMIT_PKT_BYTES = 0x1 + NFT_LIMIT_F_INV = 0x1 + NFTA_LIMIT_UNSPEC = 0x0 + NFTA_LIMIT_RATE = 0x1 + NFTA_LIMIT_UNIT = 0x2 + NFTA_LIMIT_BURST = 0x3 + NFTA_LIMIT_TYPE = 0x4 + NFTA_LIMIT_FLAGS = 0x5 + NFTA_LIMIT_PAD = 0x6 + NFTA_COUNTER_UNSPEC = 0x0 + NFTA_COUNTER_BYTES = 0x1 + NFTA_COUNTER_PACKETS = 0x2 + NFTA_COUNTER_PAD = 0x3 + NFTA_LOG_UNSPEC = 0x0 + NFTA_LOG_GROUP = 0x1 + NFTA_LOG_PREFIX = 0x2 + NFTA_LOG_SNAPLEN = 0x3 + NFTA_LOG_QTHRESHOLD = 0x4 + NFTA_LOG_LEVEL = 0x5 + NFTA_LOG_FLAGS = 0x6 + NFTA_QUEUE_UNSPEC = 0x0 + NFTA_QUEUE_NUM = 0x1 + NFTA_QUEUE_TOTAL = 0x2 + NFTA_QUEUE_FLAGS = 0x3 + NFTA_QUEUE_SREG_QNUM = 0x4 + NFT_QUOTA_F_INV = 0x1 + NFT_QUOTA_F_DEPLETED = 0x2 + NFTA_QUOTA_UNSPEC = 0x0 + NFTA_QUOTA_BYTES = 0x1 + NFTA_QUOTA_FLAGS = 0x2 + NFTA_QUOTA_PAD = 0x3 + NFTA_QUOTA_CONSUMED = 0x4 + NFT_REJECT_ICMP_UNREACH = 0x0 + NFT_REJECT_TCP_RST = 0x1 + NFT_REJECT_ICMPX_UNREACH = 0x2 + NFT_REJECT_ICMPX_NO_ROUTE = 0x0 + NFT_REJECT_ICMPX_PORT_UNREACH = 0x1 + NFT_REJECT_ICMPX_HOST_UNREACH = 0x2 + NFT_REJECT_ICMPX_ADMIN_PROHIBITED = 0x3 + NFTA_REJECT_UNSPEC = 0x0 + NFTA_REJECT_TYPE = 0x1 + NFTA_REJECT_ICMP_CODE = 0x2 + NFT_NAT_SNAT = 0x0 + NFT_NAT_DNAT = 0x1 + NFTA_NAT_UNSPEC = 0x0 + NFTA_NAT_TYPE = 0x1 + NFTA_NAT_FAMILY = 0x2 + NFTA_NAT_REG_ADDR_MIN = 0x3 + NFTA_NAT_REG_ADDR_MAX = 0x4 + NFTA_NAT_REG_PROTO_MIN = 0x5 + NFTA_NAT_REG_PROTO_MAX = 0x6 + NFTA_NAT_FLAGS = 0x7 + NFTA_MASQ_UNSPEC = 0x0 + NFTA_MASQ_FLAGS = 0x1 + NFTA_MASQ_REG_PROTO_MIN = 0x2 + NFTA_MASQ_REG_PROTO_MAX = 0x3 + NFTA_REDIR_UNSPEC = 0x0 + NFTA_REDIR_REG_PROTO_MIN = 0x1 + NFTA_REDIR_REG_PROTO_MAX = 0x2 + NFTA_REDIR_FLAGS = 0x3 + NFTA_DUP_UNSPEC = 0x0 + NFTA_DUP_SREG_ADDR = 0x1 + NFTA_DUP_SREG_DEV = 0x2 + NFTA_FWD_UNSPEC = 0x0 + NFTA_FWD_SREG_DEV = 0x1 + NFTA_OBJREF_UNSPEC = 0x0 + NFTA_OBJREF_IMM_TYPE = 0x1 + NFTA_OBJREF_IMM_NAME = 0x2 + NFTA_OBJREF_SET_SREG = 0x3 + NFTA_OBJREF_SET_NAME = 0x4 + NFTA_OBJREF_SET_ID = 0x5 + NFTA_GEN_UNSPEC = 0x0 + NFTA_GEN_ID = 0x1 + NFTA_GEN_PROC_PID = 0x2 + NFTA_GEN_PROC_NAME = 0x3 + NFTA_FIB_UNSPEC = 0x0 + NFTA_FIB_DREG = 0x1 + NFTA_FIB_RESULT = 0x2 + NFTA_FIB_FLAGS = 0x3 + NFT_FIB_RESULT_UNSPEC = 0x0 + NFT_FIB_RESULT_OIF = 0x1 + NFT_FIB_RESULT_OIFNAME = 0x2 + NFT_FIB_RESULT_ADDRTYPE = 0x3 + NFTA_FIB_F_SADDR = 0x1 + NFTA_FIB_F_DADDR = 0x2 + NFTA_FIB_F_MARK = 0x4 + NFTA_FIB_F_IIF = 0x8 + NFTA_FIB_F_OIF = 0x10 + NFTA_FIB_F_PRESENT = 0x20 + NFTA_CT_HELPER_UNSPEC = 0x0 + NFTA_CT_HELPER_NAME = 0x1 + NFTA_CT_HELPER_L3PROTO = 0x2 + NFTA_CT_HELPER_L4PROTO = 0x3 + NFTA_OBJ_UNSPEC = 0x0 + NFTA_OBJ_TABLE = 0x1 + NFTA_OBJ_NAME = 0x2 + NFTA_OBJ_TYPE = 0x3 + NFTA_OBJ_DATA = 0x4 + NFTA_OBJ_USE = 0x5 + NFTA_TRACE_UNSPEC = 0x0 + NFTA_TRACE_TABLE = 0x1 + NFTA_TRACE_CHAIN = 0x2 + NFTA_TRACE_RULE_HANDLE = 0x3 + NFTA_TRACE_TYPE = 0x4 + NFTA_TRACE_VERDICT = 0x5 + NFTA_TRACE_ID = 0x6 + NFTA_TRACE_LL_HEADER = 0x7 + NFTA_TRACE_NETWORK_HEADER = 0x8 + NFTA_TRACE_TRANSPORT_HEADER = 0x9 + NFTA_TRACE_IIF = 0xa + NFTA_TRACE_IIFTYPE = 0xb + NFTA_TRACE_OIF = 0xc + NFTA_TRACE_OIFTYPE = 0xd + NFTA_TRACE_MARK = 0xe + NFTA_TRACE_NFPROTO = 0xf + NFTA_TRACE_POLICY = 0x10 + NFTA_TRACE_PAD = 0x11 + NFT_TRACETYPE_UNSPEC = 0x0 + NFT_TRACETYPE_POLICY = 0x1 + NFT_TRACETYPE_RETURN = 0x2 + NFT_TRACETYPE_RULE = 0x3 + NFTA_NG_UNSPEC = 0x0 + NFTA_NG_DREG = 0x1 + NFTA_NG_MODULUS = 0x2 + NFTA_NG_TYPE = 0x3 + NFTA_NG_OFFSET = 0x4 + NFT_NG_INCREMENTAL = 0x0 + NFT_NG_RANDOM = 0x1 +) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go index ef00ed498ae..3a5cc696ec3 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go @@ -105,7 +105,7 @@ type Stat_t struct { Mode uint32 Uid uint32 Gid uint32 - X__pad2 int32 + _ int32 Rdev uint64 Size int64 Blksize int64 @@ -118,25 +118,10 @@ type Stat_t struct { _ uint64 } -type Statfs_t struct { - Type int64 - Bsize int64 - Blocks uint64 - Bfree uint64 - Bavail uint64 - Files uint64 - Ffree uint64 - Fsid Fsid - Namelen int64 - Frsize int64 - Flags int64 - Spare [4]int64 -} - type StatxTimestamp struct { - Sec int64 - Nsec uint32 - X__reserved int32 + Sec int64 + Nsec uint32 + _ int32 } type Statx_t struct { @@ -173,7 +158,7 @@ type Dirent struct { } type Fsid struct { - X__val [2]int32 + Val [2]int32 } type Flock_t struct { @@ -497,7 +482,7 @@ const ( IFLA_EVENT = 0x2c IFLA_NEW_NETNSID = 0x2d IFLA_IF_NETNSID = 0x2e - IFLA_MAX = 0x2e + IFLA_MAX = 0x31 RT_SCOPE_UNIVERSE = 0x0 RT_SCOPE_SITE = 0xc8 RT_SCOPE_LINK = 0xfd @@ -589,12 +574,12 @@ type RtAttr struct { } type IfInfomsg struct { - Family uint8 - X__ifi_pad uint8 - Type uint16 - Index int32 - Flags uint32 - Change uint32 + Family uint8 + _ uint8 + Type uint16 + Index int32 + Flags uint32 + Change uint32 } type IfAddrmsg struct { @@ -686,7 +671,7 @@ type Sysinfo_t struct { Totalhigh uint64 Freehigh uint64 Unit uint32 - X_f [0]uint8 + _ [0]uint8 _ [4]byte } @@ -709,10 +694,10 @@ type Ustat_t struct { } type EpollEvent struct { - Events uint32 - X_padFd int32 - Fd int32 - Pad int32 + Events uint32 + _ int32 + Fd int32 + Pad int32 } const ( @@ -746,7 +731,7 @@ const ( ) type Sigset_t struct { - X__val [16]uint64 + Val [16]uint64 } const RNDGETENTCNT = 0x40045200 @@ -902,3 +887,938 @@ const ( BDADDR_LE_PUBLIC = 0x1 BDADDR_LE_RANDOM = 0x2 ) + +type PerfEventAttr struct { + Type uint32 + Size uint32 + Config uint64 + Sample uint64 + Sample_type uint64 + Read_format uint64 + Bits uint64 + Wakeup uint32 + Bp_type uint32 + Ext1 uint64 + Ext2 uint64 + Branch_sample_type uint64 + Sample_regs_user uint64 + Sample_stack_user uint32 + Clockid int32 + Sample_regs_intr uint64 + Aux_watermark uint32 + _ uint32 +} + +type PerfEventMmapPage struct { + Version uint32 + Compat_version uint32 + Lock uint32 + Index uint32 + Offset int64 + Time_enabled uint64 + Time_running uint64 + Capabilities uint64 + Pmc_width uint16 + Time_shift uint16 + Time_mult uint32 + Time_offset uint64 + Time_zero uint64 + Size uint32 + _ [948]uint8 + Data_head uint64 + Data_tail uint64 + Data_offset uint64 + Data_size uint64 + Aux_head uint64 + Aux_tail uint64 + Aux_offset uint64 + Aux_size uint64 +} + +const ( + PerfBitDisabled uint64 = CBitFieldMaskBit0 + PerfBitInherit = CBitFieldMaskBit1 + PerfBitPinned = CBitFieldMaskBit2 + PerfBitExclusive = CBitFieldMaskBit3 + PerfBitExcludeUser = CBitFieldMaskBit4 + PerfBitExcludeKernel = CBitFieldMaskBit5 + PerfBitExcludeHv = CBitFieldMaskBit6 + PerfBitExcludeIdle = CBitFieldMaskBit7 + PerfBitMmap = CBitFieldMaskBit8 + PerfBitComm = CBitFieldMaskBit9 + PerfBitFreq = CBitFieldMaskBit10 + PerfBitInheritStat = CBitFieldMaskBit11 + PerfBitEnableOnExec = CBitFieldMaskBit12 + PerfBitTask = CBitFieldMaskBit13 + PerfBitWatermark = CBitFieldMaskBit14 + PerfBitPreciseIPBit1 = CBitFieldMaskBit15 + PerfBitPreciseIPBit2 = CBitFieldMaskBit16 + PerfBitMmapData = CBitFieldMaskBit17 + PerfBitSampleIDAll = CBitFieldMaskBit18 + PerfBitExcludeHost = CBitFieldMaskBit19 + PerfBitExcludeGuest = CBitFieldMaskBit20 + PerfBitExcludeCallchainKernel = CBitFieldMaskBit21 + PerfBitExcludeCallchainUser = CBitFieldMaskBit22 + PerfBitMmap2 = CBitFieldMaskBit23 + PerfBitCommExec = CBitFieldMaskBit24 + PerfBitUseClockID = CBitFieldMaskBit25 + PerfBitContextSwitch = CBitFieldMaskBit26 +) + +const ( + PERF_TYPE_HARDWARE = 0x0 + PERF_TYPE_SOFTWARE = 0x1 + PERF_TYPE_TRACEPOINT = 0x2 + PERF_TYPE_HW_CACHE = 0x3 + PERF_TYPE_RAW = 0x4 + PERF_TYPE_BREAKPOINT = 0x5 + + PERF_COUNT_HW_CPU_CYCLES = 0x0 + PERF_COUNT_HW_INSTRUCTIONS = 0x1 + PERF_COUNT_HW_CACHE_REFERENCES = 0x2 + PERF_COUNT_HW_CACHE_MISSES = 0x3 + PERF_COUNT_HW_BRANCH_INSTRUCTIONS = 0x4 + PERF_COUNT_HW_BRANCH_MISSES = 0x5 + PERF_COUNT_HW_BUS_CYCLES = 0x6 + PERF_COUNT_HW_STALLED_CYCLES_FRONTEND = 0x7 + PERF_COUNT_HW_STALLED_CYCLES_BACKEND = 0x8 + PERF_COUNT_HW_REF_CPU_CYCLES = 0x9 + + PERF_COUNT_HW_CACHE_L1D = 0x0 + PERF_COUNT_HW_CACHE_L1I = 0x1 + PERF_COUNT_HW_CACHE_LL = 0x2 + PERF_COUNT_HW_CACHE_DTLB = 0x3 + PERF_COUNT_HW_CACHE_ITLB = 0x4 + PERF_COUNT_HW_CACHE_BPU = 0x5 + PERF_COUNT_HW_CACHE_NODE = 0x6 + + PERF_COUNT_HW_CACHE_OP_READ = 0x0 + PERF_COUNT_HW_CACHE_OP_WRITE = 0x1 + PERF_COUNT_HW_CACHE_OP_PREFETCH = 0x2 + + PERF_COUNT_HW_CACHE_RESULT_ACCESS = 0x0 + PERF_COUNT_HW_CACHE_RESULT_MISS = 0x1 + + PERF_COUNT_SW_CPU_CLOCK = 0x0 + PERF_COUNT_SW_TASK_CLOCK = 0x1 + PERF_COUNT_SW_PAGE_FAULTS = 0x2 + PERF_COUNT_SW_CONTEXT_SWITCHES = 0x3 + PERF_COUNT_SW_CPU_MIGRATIONS = 0x4 + PERF_COUNT_SW_PAGE_FAULTS_MIN = 0x5 + PERF_COUNT_SW_PAGE_FAULTS_MAJ = 0x6 + PERF_COUNT_SW_ALIGNMENT_FAULTS = 0x7 + PERF_COUNT_SW_EMULATION_FAULTS = 0x8 + PERF_COUNT_SW_DUMMY = 0x9 + + PERF_SAMPLE_IP = 0x1 + PERF_SAMPLE_TID = 0x2 + PERF_SAMPLE_TIME = 0x4 + PERF_SAMPLE_ADDR = 0x8 + PERF_SAMPLE_READ = 0x10 + PERF_SAMPLE_CALLCHAIN = 0x20 + PERF_SAMPLE_ID = 0x40 + PERF_SAMPLE_CPU = 0x80 + PERF_SAMPLE_PERIOD = 0x100 + PERF_SAMPLE_STREAM_ID = 0x200 + PERF_SAMPLE_RAW = 0x400 + PERF_SAMPLE_BRANCH_STACK = 0x800 + + PERF_SAMPLE_BRANCH_USER = 0x1 + PERF_SAMPLE_BRANCH_KERNEL = 0x2 + PERF_SAMPLE_BRANCH_HV = 0x4 + PERF_SAMPLE_BRANCH_ANY = 0x8 + PERF_SAMPLE_BRANCH_ANY_CALL = 0x10 + PERF_SAMPLE_BRANCH_ANY_RETURN = 0x20 + PERF_SAMPLE_BRANCH_IND_CALL = 0x40 + + PERF_FORMAT_TOTAL_TIME_ENABLED = 0x1 + PERF_FORMAT_TOTAL_TIME_RUNNING = 0x2 + PERF_FORMAT_ID = 0x4 + PERF_FORMAT_GROUP = 0x8 + + PERF_RECORD_MMAP = 0x1 + PERF_RECORD_LOST = 0x2 + PERF_RECORD_COMM = 0x3 + PERF_RECORD_EXIT = 0x4 + PERF_RECORD_THROTTLE = 0x5 + PERF_RECORD_UNTHROTTLE = 0x6 + PERF_RECORD_FORK = 0x7 + PERF_RECORD_READ = 0x8 + PERF_RECORD_SAMPLE = 0x9 + + PERF_CONTEXT_HV = -0x20 + PERF_CONTEXT_KERNEL = -0x80 + PERF_CONTEXT_USER = -0x200 + + PERF_CONTEXT_GUEST = -0x800 + PERF_CONTEXT_GUEST_KERNEL = -0x880 + PERF_CONTEXT_GUEST_USER = -0xa00 + + PERF_FLAG_FD_NO_GROUP = 0x1 + PERF_FLAG_FD_OUTPUT = 0x2 + PERF_FLAG_PID_CGROUP = 0x4 +) + +const ( + CBitFieldMaskBit0 = 0x1 + CBitFieldMaskBit1 = 0x2 + CBitFieldMaskBit2 = 0x4 + CBitFieldMaskBit3 = 0x8 + CBitFieldMaskBit4 = 0x10 + CBitFieldMaskBit5 = 0x20 + CBitFieldMaskBit6 = 0x40 + CBitFieldMaskBit7 = 0x80 + CBitFieldMaskBit8 = 0x100 + CBitFieldMaskBit9 = 0x200 + CBitFieldMaskBit10 = 0x400 + CBitFieldMaskBit11 = 0x800 + CBitFieldMaskBit12 = 0x1000 + CBitFieldMaskBit13 = 0x2000 + CBitFieldMaskBit14 = 0x4000 + CBitFieldMaskBit15 = 0x8000 + CBitFieldMaskBit16 = 0x10000 + CBitFieldMaskBit17 = 0x20000 + CBitFieldMaskBit18 = 0x40000 + CBitFieldMaskBit19 = 0x80000 + CBitFieldMaskBit20 = 0x100000 + CBitFieldMaskBit21 = 0x200000 + CBitFieldMaskBit22 = 0x400000 + CBitFieldMaskBit23 = 0x800000 + CBitFieldMaskBit24 = 0x1000000 + CBitFieldMaskBit25 = 0x2000000 + CBitFieldMaskBit26 = 0x4000000 + CBitFieldMaskBit27 = 0x8000000 + CBitFieldMaskBit28 = 0x10000000 + CBitFieldMaskBit29 = 0x20000000 + CBitFieldMaskBit30 = 0x40000000 + CBitFieldMaskBit31 = 0x80000000 + CBitFieldMaskBit32 = 0x100000000 + CBitFieldMaskBit33 = 0x200000000 + CBitFieldMaskBit34 = 0x400000000 + CBitFieldMaskBit35 = 0x800000000 + CBitFieldMaskBit36 = 0x1000000000 + CBitFieldMaskBit37 = 0x2000000000 + CBitFieldMaskBit38 = 0x4000000000 + CBitFieldMaskBit39 = 0x8000000000 + CBitFieldMaskBit40 = 0x10000000000 + CBitFieldMaskBit41 = 0x20000000000 + CBitFieldMaskBit42 = 0x40000000000 + CBitFieldMaskBit43 = 0x80000000000 + CBitFieldMaskBit44 = 0x100000000000 + CBitFieldMaskBit45 = 0x200000000000 + CBitFieldMaskBit46 = 0x400000000000 + CBitFieldMaskBit47 = 0x800000000000 + CBitFieldMaskBit48 = 0x1000000000000 + CBitFieldMaskBit49 = 0x2000000000000 + CBitFieldMaskBit50 = 0x4000000000000 + CBitFieldMaskBit51 = 0x8000000000000 + CBitFieldMaskBit52 = 0x10000000000000 + CBitFieldMaskBit53 = 0x20000000000000 + CBitFieldMaskBit54 = 0x40000000000000 + CBitFieldMaskBit55 = 0x80000000000000 + CBitFieldMaskBit56 = 0x100000000000000 + CBitFieldMaskBit57 = 0x200000000000000 + CBitFieldMaskBit58 = 0x400000000000000 + CBitFieldMaskBit59 = 0x800000000000000 + CBitFieldMaskBit60 = 0x1000000000000000 + CBitFieldMaskBit61 = 0x2000000000000000 + CBitFieldMaskBit62 = 0x4000000000000000 + CBitFieldMaskBit63 = 0x8000000000000000 +) + +type SockaddrStorage struct { + Family uint16 + _ [118]uint8 + _ uint64 +} + +type TCPMD5Sig struct { + Addr SockaddrStorage + Flags uint8 + Prefixlen uint8 + Keylen uint16 + _ uint32 + Key [80]uint8 +} + +type HDDriveCmdHdr struct { + Command uint8 + Number uint8 + Feature uint8 + Count uint8 +} + +type HDGeometry struct { + Heads uint8 + Sectors uint8 + Cylinders uint16 + _ [4]byte + Start uint64 +} + +type HDDriveID struct { + Config uint16 + Cyls uint16 + Reserved2 uint16 + Heads uint16 + Track_bytes uint16 + Sector_bytes uint16 + Sectors uint16 + Vendor0 uint16 + Vendor1 uint16 + Vendor2 uint16 + Serial_no [20]uint8 + Buf_type uint16 + Buf_size uint16 + Ecc_bytes uint16 + Fw_rev [8]uint8 + Model [40]uint8 + Max_multsect uint8 + Vendor3 uint8 + Dword_io uint16 + Vendor4 uint8 + Capability uint8 + Reserved50 uint16 + Vendor5 uint8 + TPIO uint8 + Vendor6 uint8 + TDMA uint8 + Field_valid uint16 + Cur_cyls uint16 + Cur_heads uint16 + Cur_sectors uint16 + Cur_capacity0 uint16 + Cur_capacity1 uint16 + Multsect uint8 + Multsect_valid uint8 + Lba_capacity uint32 + Dma_1word uint16 + Dma_mword uint16 + Eide_pio_modes uint16 + Eide_dma_min uint16 + Eide_dma_time uint16 + Eide_pio uint16 + Eide_pio_iordy uint16 + Words69_70 [2]uint16 + Words71_74 [4]uint16 + Queue_depth uint16 + Words76_79 [4]uint16 + Major_rev_num uint16 + Minor_rev_num uint16 + Command_set_1 uint16 + Command_set_2 uint16 + Cfsse uint16 + Cfs_enable_1 uint16 + Cfs_enable_2 uint16 + Csf_default uint16 + Dma_ultra uint16 + Trseuc uint16 + TrsEuc uint16 + CurAPMvalues uint16 + Mprc uint16 + Hw_config uint16 + Acoustic uint16 + Msrqs uint16 + Sxfert uint16 + Sal uint16 + Spg uint32 + Lba_capacity_2 uint64 + Words104_125 [22]uint16 + Last_lun uint16 + Word127 uint16 + Dlf uint16 + Csfo uint16 + Words130_155 [26]uint16 + Word156 uint16 + Words157_159 [3]uint16 + Cfa_power uint16 + Words161_175 [15]uint16 + Words176_205 [30]uint16 + Words206_254 [49]uint16 + Integrity_word uint16 +} + +type Statfs_t struct { + Type int64 + Bsize int64 + Blocks uint64 + Bfree uint64 + Bavail uint64 + Files uint64 + Ffree uint64 + Fsid Fsid + Namelen int64 + Frsize int64 + Flags int64 + Spare [4]int64 +} + +const ( + ST_MANDLOCK = 0x40 + ST_NOATIME = 0x400 + ST_NODEV = 0x4 + ST_NODIRATIME = 0x800 + ST_NOEXEC = 0x8 + ST_NOSUID = 0x2 + ST_RDONLY = 0x1 + ST_RELATIME = 0x1000 + ST_SYNCHRONOUS = 0x10 +) + +type TpacketHdr struct { + Status uint64 + Len uint32 + Snaplen uint32 + Mac uint16 + Net uint16 + Sec uint32 + Usec uint32 + _ [4]byte +} + +type Tpacket2Hdr struct { + Status uint32 + Len uint32 + Snaplen uint32 + Mac uint16 + Net uint16 + Sec uint32 + Nsec uint32 + Vlan_tci uint16 + Vlan_tpid uint16 + _ [4]uint8 +} + +type Tpacket3Hdr struct { + Next_offset uint32 + Sec uint32 + Nsec uint32 + Snaplen uint32 + Len uint32 + Status uint32 + Mac uint16 + Net uint16 + Hv1 TpacketHdrVariant1 + _ [8]uint8 +} + +type TpacketHdrVariant1 struct { + Rxhash uint32 + Vlan_tci uint32 + Vlan_tpid uint16 + _ uint16 +} + +type TpacketBlockDesc struct { + Version uint32 + To_priv uint32 + Hdr [40]byte +} + +type TpacketReq struct { + Block_size uint32 + Block_nr uint32 + Frame_size uint32 + Frame_nr uint32 +} + +type TpacketReq3 struct { + Block_size uint32 + Block_nr uint32 + Frame_size uint32 + Frame_nr uint32 + Retire_blk_tov uint32 + Sizeof_priv uint32 + Feature_req_word uint32 +} + +type TpacketStats struct { + Packets uint32 + Drops uint32 +} + +type TpacketStatsV3 struct { + Packets uint32 + Drops uint32 + Freeze_q_cnt uint32 +} + +type TpacketAuxdata struct { + Status uint32 + Len uint32 + Snaplen uint32 + Mac uint16 + Net uint16 + Vlan_tci uint16 + Vlan_tpid uint16 +} + +const ( + TPACKET_V1 = 0x0 + TPACKET_V2 = 0x1 + TPACKET_V3 = 0x2 +) + +const ( + SizeofTpacketHdr = 0x20 + SizeofTpacket2Hdr = 0x20 + SizeofTpacket3Hdr = 0x30 +) + +const ( + NF_INET_PRE_ROUTING = 0x0 + NF_INET_LOCAL_IN = 0x1 + NF_INET_FORWARD = 0x2 + NF_INET_LOCAL_OUT = 0x3 + NF_INET_POST_ROUTING = 0x4 + NF_INET_NUMHOOKS = 0x5 +) + +const ( + NF_NETDEV_INGRESS = 0x0 + NF_NETDEV_NUMHOOKS = 0x1 +) + +const ( + NFPROTO_UNSPEC = 0x0 + NFPROTO_INET = 0x1 + NFPROTO_IPV4 = 0x2 + NFPROTO_ARP = 0x3 + NFPROTO_NETDEV = 0x5 + NFPROTO_BRIDGE = 0x7 + NFPROTO_IPV6 = 0xa + NFPROTO_DECNET = 0xc + NFPROTO_NUMPROTO = 0xd +) + +type Nfgenmsg struct { + Nfgen_family uint8 + Version uint8 + Res_id uint16 +} + +const ( + NFNL_BATCH_UNSPEC = 0x0 + NFNL_BATCH_GENID = 0x1 +) + +const ( + NFT_REG_VERDICT = 0x0 + NFT_REG_1 = 0x1 + NFT_REG_2 = 0x2 + NFT_REG_3 = 0x3 + NFT_REG_4 = 0x4 + NFT_REG32_00 = 0x8 + NFT_REG32_01 = 0x9 + NFT_REG32_02 = 0xa + NFT_REG32_03 = 0xb + NFT_REG32_04 = 0xc + NFT_REG32_05 = 0xd + NFT_REG32_06 = 0xe + NFT_REG32_07 = 0xf + NFT_REG32_08 = 0x10 + NFT_REG32_09 = 0x11 + NFT_REG32_10 = 0x12 + NFT_REG32_11 = 0x13 + NFT_REG32_12 = 0x14 + NFT_REG32_13 = 0x15 + NFT_REG32_14 = 0x16 + NFT_REG32_15 = 0x17 + NFT_CONTINUE = -0x1 + NFT_BREAK = -0x2 + NFT_JUMP = -0x3 + NFT_GOTO = -0x4 + NFT_RETURN = -0x5 + NFT_MSG_NEWTABLE = 0x0 + NFT_MSG_GETTABLE = 0x1 + NFT_MSG_DELTABLE = 0x2 + NFT_MSG_NEWCHAIN = 0x3 + NFT_MSG_GETCHAIN = 0x4 + NFT_MSG_DELCHAIN = 0x5 + NFT_MSG_NEWRULE = 0x6 + NFT_MSG_GETRULE = 0x7 + NFT_MSG_DELRULE = 0x8 + NFT_MSG_NEWSET = 0x9 + NFT_MSG_GETSET = 0xa + NFT_MSG_DELSET = 0xb + NFT_MSG_NEWSETELEM = 0xc + NFT_MSG_GETSETELEM = 0xd + NFT_MSG_DELSETELEM = 0xe + NFT_MSG_NEWGEN = 0xf + NFT_MSG_GETGEN = 0x10 + NFT_MSG_TRACE = 0x11 + NFT_MSG_NEWOBJ = 0x12 + NFT_MSG_GETOBJ = 0x13 + NFT_MSG_DELOBJ = 0x14 + NFT_MSG_GETOBJ_RESET = 0x15 + NFT_MSG_MAX = 0x19 + NFTA_LIST_UNPEC = 0x0 + NFTA_LIST_ELEM = 0x1 + NFTA_HOOK_UNSPEC = 0x0 + NFTA_HOOK_HOOKNUM = 0x1 + NFTA_HOOK_PRIORITY = 0x2 + NFTA_HOOK_DEV = 0x3 + NFT_TABLE_F_DORMANT = 0x1 + NFTA_TABLE_UNSPEC = 0x0 + NFTA_TABLE_NAME = 0x1 + NFTA_TABLE_FLAGS = 0x2 + NFTA_TABLE_USE = 0x3 + NFTA_CHAIN_UNSPEC = 0x0 + NFTA_CHAIN_TABLE = 0x1 + NFTA_CHAIN_HANDLE = 0x2 + NFTA_CHAIN_NAME = 0x3 + NFTA_CHAIN_HOOK = 0x4 + NFTA_CHAIN_POLICY = 0x5 + NFTA_CHAIN_USE = 0x6 + NFTA_CHAIN_TYPE = 0x7 + NFTA_CHAIN_COUNTERS = 0x8 + NFTA_CHAIN_PAD = 0x9 + NFTA_RULE_UNSPEC = 0x0 + NFTA_RULE_TABLE = 0x1 + NFTA_RULE_CHAIN = 0x2 + NFTA_RULE_HANDLE = 0x3 + NFTA_RULE_EXPRESSIONS = 0x4 + NFTA_RULE_COMPAT = 0x5 + NFTA_RULE_POSITION = 0x6 + NFTA_RULE_USERDATA = 0x7 + NFTA_RULE_PAD = 0x8 + NFTA_RULE_ID = 0x9 + NFT_RULE_COMPAT_F_INV = 0x2 + NFT_RULE_COMPAT_F_MASK = 0x2 + NFTA_RULE_COMPAT_UNSPEC = 0x0 + NFTA_RULE_COMPAT_PROTO = 0x1 + NFTA_RULE_COMPAT_FLAGS = 0x2 + NFT_SET_ANONYMOUS = 0x1 + NFT_SET_CONSTANT = 0x2 + NFT_SET_INTERVAL = 0x4 + NFT_SET_MAP = 0x8 + NFT_SET_TIMEOUT = 0x10 + NFT_SET_EVAL = 0x20 + NFT_SET_OBJECT = 0x40 + NFT_SET_POL_PERFORMANCE = 0x0 + NFT_SET_POL_MEMORY = 0x1 + NFTA_SET_DESC_UNSPEC = 0x0 + NFTA_SET_DESC_SIZE = 0x1 + NFTA_SET_UNSPEC = 0x0 + NFTA_SET_TABLE = 0x1 + NFTA_SET_NAME = 0x2 + NFTA_SET_FLAGS = 0x3 + NFTA_SET_KEY_TYPE = 0x4 + NFTA_SET_KEY_LEN = 0x5 + NFTA_SET_DATA_TYPE = 0x6 + NFTA_SET_DATA_LEN = 0x7 + NFTA_SET_POLICY = 0x8 + NFTA_SET_DESC = 0x9 + NFTA_SET_ID = 0xa + NFTA_SET_TIMEOUT = 0xb + NFTA_SET_GC_INTERVAL = 0xc + NFTA_SET_USERDATA = 0xd + NFTA_SET_PAD = 0xe + NFTA_SET_OBJ_TYPE = 0xf + NFT_SET_ELEM_INTERVAL_END = 0x1 + NFTA_SET_ELEM_UNSPEC = 0x0 + NFTA_SET_ELEM_KEY = 0x1 + NFTA_SET_ELEM_DATA = 0x2 + NFTA_SET_ELEM_FLAGS = 0x3 + NFTA_SET_ELEM_TIMEOUT = 0x4 + NFTA_SET_ELEM_EXPIRATION = 0x5 + NFTA_SET_ELEM_USERDATA = 0x6 + NFTA_SET_ELEM_EXPR = 0x7 + NFTA_SET_ELEM_PAD = 0x8 + NFTA_SET_ELEM_OBJREF = 0x9 + NFTA_SET_ELEM_LIST_UNSPEC = 0x0 + NFTA_SET_ELEM_LIST_TABLE = 0x1 + NFTA_SET_ELEM_LIST_SET = 0x2 + NFTA_SET_ELEM_LIST_ELEMENTS = 0x3 + NFTA_SET_ELEM_LIST_SET_ID = 0x4 + NFT_DATA_VALUE = 0x0 + NFT_DATA_VERDICT = 0xffffff00 + NFTA_DATA_UNSPEC = 0x0 + NFTA_DATA_VALUE = 0x1 + NFTA_DATA_VERDICT = 0x2 + NFTA_VERDICT_UNSPEC = 0x0 + NFTA_VERDICT_CODE = 0x1 + NFTA_VERDICT_CHAIN = 0x2 + NFTA_EXPR_UNSPEC = 0x0 + NFTA_EXPR_NAME = 0x1 + NFTA_EXPR_DATA = 0x2 + NFTA_IMMEDIATE_UNSPEC = 0x0 + NFTA_IMMEDIATE_DREG = 0x1 + NFTA_IMMEDIATE_DATA = 0x2 + NFTA_BITWISE_UNSPEC = 0x0 + NFTA_BITWISE_SREG = 0x1 + NFTA_BITWISE_DREG = 0x2 + NFTA_BITWISE_LEN = 0x3 + NFTA_BITWISE_MASK = 0x4 + NFTA_BITWISE_XOR = 0x5 + NFT_BYTEORDER_NTOH = 0x0 + NFT_BYTEORDER_HTON = 0x1 + NFTA_BYTEORDER_UNSPEC = 0x0 + NFTA_BYTEORDER_SREG = 0x1 + NFTA_BYTEORDER_DREG = 0x2 + NFTA_BYTEORDER_OP = 0x3 + NFTA_BYTEORDER_LEN = 0x4 + NFTA_BYTEORDER_SIZE = 0x5 + NFT_CMP_EQ = 0x0 + NFT_CMP_NEQ = 0x1 + NFT_CMP_LT = 0x2 + NFT_CMP_LTE = 0x3 + NFT_CMP_GT = 0x4 + NFT_CMP_GTE = 0x5 + NFTA_CMP_UNSPEC = 0x0 + NFTA_CMP_SREG = 0x1 + NFTA_CMP_OP = 0x2 + NFTA_CMP_DATA = 0x3 + NFT_RANGE_EQ = 0x0 + NFT_RANGE_NEQ = 0x1 + NFTA_RANGE_UNSPEC = 0x0 + NFTA_RANGE_SREG = 0x1 + NFTA_RANGE_OP = 0x2 + NFTA_RANGE_FROM_DATA = 0x3 + NFTA_RANGE_TO_DATA = 0x4 + NFT_LOOKUP_F_INV = 0x1 + NFTA_LOOKUP_UNSPEC = 0x0 + NFTA_LOOKUP_SET = 0x1 + NFTA_LOOKUP_SREG = 0x2 + NFTA_LOOKUP_DREG = 0x3 + NFTA_LOOKUP_SET_ID = 0x4 + NFTA_LOOKUP_FLAGS = 0x5 + NFT_DYNSET_OP_ADD = 0x0 + NFT_DYNSET_OP_UPDATE = 0x1 + NFT_DYNSET_F_INV = 0x1 + NFTA_DYNSET_UNSPEC = 0x0 + NFTA_DYNSET_SET_NAME = 0x1 + NFTA_DYNSET_SET_ID = 0x2 + NFTA_DYNSET_OP = 0x3 + NFTA_DYNSET_SREG_KEY = 0x4 + NFTA_DYNSET_SREG_DATA = 0x5 + NFTA_DYNSET_TIMEOUT = 0x6 + NFTA_DYNSET_EXPR = 0x7 + NFTA_DYNSET_PAD = 0x8 + NFTA_DYNSET_FLAGS = 0x9 + NFT_PAYLOAD_LL_HEADER = 0x0 + NFT_PAYLOAD_NETWORK_HEADER = 0x1 + NFT_PAYLOAD_TRANSPORT_HEADER = 0x2 + NFT_PAYLOAD_CSUM_NONE = 0x0 + NFT_PAYLOAD_CSUM_INET = 0x1 + NFT_PAYLOAD_L4CSUM_PSEUDOHDR = 0x1 + NFTA_PAYLOAD_UNSPEC = 0x0 + NFTA_PAYLOAD_DREG = 0x1 + NFTA_PAYLOAD_BASE = 0x2 + NFTA_PAYLOAD_OFFSET = 0x3 + NFTA_PAYLOAD_LEN = 0x4 + NFTA_PAYLOAD_SREG = 0x5 + NFTA_PAYLOAD_CSUM_TYPE = 0x6 + NFTA_PAYLOAD_CSUM_OFFSET = 0x7 + NFTA_PAYLOAD_CSUM_FLAGS = 0x8 + NFT_EXTHDR_F_PRESENT = 0x1 + NFT_EXTHDR_OP_IPV6 = 0x0 + NFT_EXTHDR_OP_TCPOPT = 0x1 + NFTA_EXTHDR_UNSPEC = 0x0 + NFTA_EXTHDR_DREG = 0x1 + NFTA_EXTHDR_TYPE = 0x2 + NFTA_EXTHDR_OFFSET = 0x3 + NFTA_EXTHDR_LEN = 0x4 + NFTA_EXTHDR_FLAGS = 0x5 + NFTA_EXTHDR_OP = 0x6 + NFTA_EXTHDR_SREG = 0x7 + NFT_META_LEN = 0x0 + NFT_META_PROTOCOL = 0x1 + NFT_META_PRIORITY = 0x2 + NFT_META_MARK = 0x3 + NFT_META_IIF = 0x4 + NFT_META_OIF = 0x5 + NFT_META_IIFNAME = 0x6 + NFT_META_OIFNAME = 0x7 + NFT_META_IIFTYPE = 0x8 + NFT_META_OIFTYPE = 0x9 + NFT_META_SKUID = 0xa + NFT_META_SKGID = 0xb + NFT_META_NFTRACE = 0xc + NFT_META_RTCLASSID = 0xd + NFT_META_SECMARK = 0xe + NFT_META_NFPROTO = 0xf + NFT_META_L4PROTO = 0x10 + NFT_META_BRI_IIFNAME = 0x11 + NFT_META_BRI_OIFNAME = 0x12 + NFT_META_PKTTYPE = 0x13 + NFT_META_CPU = 0x14 + NFT_META_IIFGROUP = 0x15 + NFT_META_OIFGROUP = 0x16 + NFT_META_CGROUP = 0x17 + NFT_META_PRANDOM = 0x18 + NFT_RT_CLASSID = 0x0 + NFT_RT_NEXTHOP4 = 0x1 + NFT_RT_NEXTHOP6 = 0x2 + NFT_RT_TCPMSS = 0x3 + NFT_HASH_JENKINS = 0x0 + NFT_HASH_SYM = 0x1 + NFTA_HASH_UNSPEC = 0x0 + NFTA_HASH_SREG = 0x1 + NFTA_HASH_DREG = 0x2 + NFTA_HASH_LEN = 0x3 + NFTA_HASH_MODULUS = 0x4 + NFTA_HASH_SEED = 0x5 + NFTA_HASH_OFFSET = 0x6 + NFTA_HASH_TYPE = 0x7 + NFTA_META_UNSPEC = 0x0 + NFTA_META_DREG = 0x1 + NFTA_META_KEY = 0x2 + NFTA_META_SREG = 0x3 + NFTA_RT_UNSPEC = 0x0 + NFTA_RT_DREG = 0x1 + NFTA_RT_KEY = 0x2 + NFT_CT_STATE = 0x0 + NFT_CT_DIRECTION = 0x1 + NFT_CT_STATUS = 0x2 + NFT_CT_MARK = 0x3 + NFT_CT_SECMARK = 0x4 + NFT_CT_EXPIRATION = 0x5 + NFT_CT_HELPER = 0x6 + NFT_CT_L3PROTOCOL = 0x7 + NFT_CT_SRC = 0x8 + NFT_CT_DST = 0x9 + NFT_CT_PROTOCOL = 0xa + NFT_CT_PROTO_SRC = 0xb + NFT_CT_PROTO_DST = 0xc + NFT_CT_LABELS = 0xd + NFT_CT_PKTS = 0xe + NFT_CT_BYTES = 0xf + NFT_CT_AVGPKT = 0x10 + NFT_CT_ZONE = 0x11 + NFT_CT_EVENTMASK = 0x12 + NFTA_CT_UNSPEC = 0x0 + NFTA_CT_DREG = 0x1 + NFTA_CT_KEY = 0x2 + NFTA_CT_DIRECTION = 0x3 + NFTA_CT_SREG = 0x4 + NFT_LIMIT_PKTS = 0x0 + NFT_LIMIT_PKT_BYTES = 0x1 + NFT_LIMIT_F_INV = 0x1 + NFTA_LIMIT_UNSPEC = 0x0 + NFTA_LIMIT_RATE = 0x1 + NFTA_LIMIT_UNIT = 0x2 + NFTA_LIMIT_BURST = 0x3 + NFTA_LIMIT_TYPE = 0x4 + NFTA_LIMIT_FLAGS = 0x5 + NFTA_LIMIT_PAD = 0x6 + NFTA_COUNTER_UNSPEC = 0x0 + NFTA_COUNTER_BYTES = 0x1 + NFTA_COUNTER_PACKETS = 0x2 + NFTA_COUNTER_PAD = 0x3 + NFTA_LOG_UNSPEC = 0x0 + NFTA_LOG_GROUP = 0x1 + NFTA_LOG_PREFIX = 0x2 + NFTA_LOG_SNAPLEN = 0x3 + NFTA_LOG_QTHRESHOLD = 0x4 + NFTA_LOG_LEVEL = 0x5 + NFTA_LOG_FLAGS = 0x6 + NFTA_QUEUE_UNSPEC = 0x0 + NFTA_QUEUE_NUM = 0x1 + NFTA_QUEUE_TOTAL = 0x2 + NFTA_QUEUE_FLAGS = 0x3 + NFTA_QUEUE_SREG_QNUM = 0x4 + NFT_QUOTA_F_INV = 0x1 + NFT_QUOTA_F_DEPLETED = 0x2 + NFTA_QUOTA_UNSPEC = 0x0 + NFTA_QUOTA_BYTES = 0x1 + NFTA_QUOTA_FLAGS = 0x2 + NFTA_QUOTA_PAD = 0x3 + NFTA_QUOTA_CONSUMED = 0x4 + NFT_REJECT_ICMP_UNREACH = 0x0 + NFT_REJECT_TCP_RST = 0x1 + NFT_REJECT_ICMPX_UNREACH = 0x2 + NFT_REJECT_ICMPX_NO_ROUTE = 0x0 + NFT_REJECT_ICMPX_PORT_UNREACH = 0x1 + NFT_REJECT_ICMPX_HOST_UNREACH = 0x2 + NFT_REJECT_ICMPX_ADMIN_PROHIBITED = 0x3 + NFTA_REJECT_UNSPEC = 0x0 + NFTA_REJECT_TYPE = 0x1 + NFTA_REJECT_ICMP_CODE = 0x2 + NFT_NAT_SNAT = 0x0 + NFT_NAT_DNAT = 0x1 + NFTA_NAT_UNSPEC = 0x0 + NFTA_NAT_TYPE = 0x1 + NFTA_NAT_FAMILY = 0x2 + NFTA_NAT_REG_ADDR_MIN = 0x3 + NFTA_NAT_REG_ADDR_MAX = 0x4 + NFTA_NAT_REG_PROTO_MIN = 0x5 + NFTA_NAT_REG_PROTO_MAX = 0x6 + NFTA_NAT_FLAGS = 0x7 + NFTA_MASQ_UNSPEC = 0x0 + NFTA_MASQ_FLAGS = 0x1 + NFTA_MASQ_REG_PROTO_MIN = 0x2 + NFTA_MASQ_REG_PROTO_MAX = 0x3 + NFTA_REDIR_UNSPEC = 0x0 + NFTA_REDIR_REG_PROTO_MIN = 0x1 + NFTA_REDIR_REG_PROTO_MAX = 0x2 + NFTA_REDIR_FLAGS = 0x3 + NFTA_DUP_UNSPEC = 0x0 + NFTA_DUP_SREG_ADDR = 0x1 + NFTA_DUP_SREG_DEV = 0x2 + NFTA_FWD_UNSPEC = 0x0 + NFTA_FWD_SREG_DEV = 0x1 + NFTA_OBJREF_UNSPEC = 0x0 + NFTA_OBJREF_IMM_TYPE = 0x1 + NFTA_OBJREF_IMM_NAME = 0x2 + NFTA_OBJREF_SET_SREG = 0x3 + NFTA_OBJREF_SET_NAME = 0x4 + NFTA_OBJREF_SET_ID = 0x5 + NFTA_GEN_UNSPEC = 0x0 + NFTA_GEN_ID = 0x1 + NFTA_GEN_PROC_PID = 0x2 + NFTA_GEN_PROC_NAME = 0x3 + NFTA_FIB_UNSPEC = 0x0 + NFTA_FIB_DREG = 0x1 + NFTA_FIB_RESULT = 0x2 + NFTA_FIB_FLAGS = 0x3 + NFT_FIB_RESULT_UNSPEC = 0x0 + NFT_FIB_RESULT_OIF = 0x1 + NFT_FIB_RESULT_OIFNAME = 0x2 + NFT_FIB_RESULT_ADDRTYPE = 0x3 + NFTA_FIB_F_SADDR = 0x1 + NFTA_FIB_F_DADDR = 0x2 + NFTA_FIB_F_MARK = 0x4 + NFTA_FIB_F_IIF = 0x8 + NFTA_FIB_F_OIF = 0x10 + NFTA_FIB_F_PRESENT = 0x20 + NFTA_CT_HELPER_UNSPEC = 0x0 + NFTA_CT_HELPER_NAME = 0x1 + NFTA_CT_HELPER_L3PROTO = 0x2 + NFTA_CT_HELPER_L4PROTO = 0x3 + NFTA_OBJ_UNSPEC = 0x0 + NFTA_OBJ_TABLE = 0x1 + NFTA_OBJ_NAME = 0x2 + NFTA_OBJ_TYPE = 0x3 + NFTA_OBJ_DATA = 0x4 + NFTA_OBJ_USE = 0x5 + NFTA_TRACE_UNSPEC = 0x0 + NFTA_TRACE_TABLE = 0x1 + NFTA_TRACE_CHAIN = 0x2 + NFTA_TRACE_RULE_HANDLE = 0x3 + NFTA_TRACE_TYPE = 0x4 + NFTA_TRACE_VERDICT = 0x5 + NFTA_TRACE_ID = 0x6 + NFTA_TRACE_LL_HEADER = 0x7 + NFTA_TRACE_NETWORK_HEADER = 0x8 + NFTA_TRACE_TRANSPORT_HEADER = 0x9 + NFTA_TRACE_IIF = 0xa + NFTA_TRACE_IIFTYPE = 0xb + NFTA_TRACE_OIF = 0xc + NFTA_TRACE_OIFTYPE = 0xd + NFTA_TRACE_MARK = 0xe + NFTA_TRACE_NFPROTO = 0xf + NFTA_TRACE_POLICY = 0x10 + NFTA_TRACE_PAD = 0x11 + NFT_TRACETYPE_UNSPEC = 0x0 + NFT_TRACETYPE_POLICY = 0x1 + NFT_TRACETYPE_RETURN = 0x2 + NFT_TRACETYPE_RULE = 0x3 + NFTA_NG_UNSPEC = 0x0 + NFTA_NG_DREG = 0x1 + NFTA_NG_MODULUS = 0x2 + NFTA_NG_TYPE = 0x3 + NFTA_NG_OFFSET = 0x4 + NFT_NG_INCREMENTAL = 0x0 + NFT_NG_RANDOM = 0x1 +) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go b/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go index e9ee4970604..032a71bbfe3 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go @@ -116,22 +116,6 @@ type Stat_t struct { _ [3]int64 } -type Statfs_t struct { - Type uint32 - Bsize uint32 - Blocks uint64 - Bfree uint64 - Bavail uint64 - Files uint64 - Ffree uint64 - Fsid Fsid - Namelen uint32 - Frsize uint32 - Flags uint32 - Spare [4]uint32 - _ [4]byte -} - type StatxTimestamp struct { Sec int64 Nsec uint32 @@ -172,7 +156,7 @@ type Dirent struct { } type Fsid struct { - _ [2]int32 + Val [2]int32 } type Flock_t struct { @@ -496,7 +480,7 @@ const ( IFLA_EVENT = 0x2c IFLA_NEW_NETNSID = 0x2d IFLA_IF_NETNSID = 0x2e - IFLA_MAX = 0x2e + IFLA_MAX = 0x31 RT_SCOPE_UNIVERSE = 0x0 RT_SCOPE_SITE = 0xc8 RT_SCOPE_LINK = 0xfd @@ -763,7 +747,7 @@ const ( ) type Sigset_t struct { - _ [16]uint64 + Val [16]uint64 } const RNDGETENTCNT = 0x80045200 @@ -919,3 +903,939 @@ const ( BDADDR_LE_PUBLIC = 0x1 BDADDR_LE_RANDOM = 0x2 ) + +type PerfEventAttr struct { + Type uint32 + Size uint32 + Config uint64 + Sample uint64 + Sample_type uint64 + Read_format uint64 + Bits uint64 + Wakeup uint32 + Bp_type uint32 + Ext1 uint64 + Ext2 uint64 + Branch_sample_type uint64 + Sample_regs_user uint64 + Sample_stack_user uint32 + Clockid int32 + Sample_regs_intr uint64 + Aux_watermark uint32 + _ uint32 +} + +type PerfEventMmapPage struct { + Version uint32 + Compat_version uint32 + Lock uint32 + Index uint32 + Offset int64 + Time_enabled uint64 + Time_running uint64 + Capabilities uint64 + Pmc_width uint16 + Time_shift uint16 + Time_mult uint32 + Time_offset uint64 + Time_zero uint64 + Size uint32 + _ [948]uint8 + Data_head uint64 + Data_tail uint64 + Data_offset uint64 + Data_size uint64 + Aux_head uint64 + Aux_tail uint64 + Aux_offset uint64 + Aux_size uint64 +} + +const ( + PerfBitDisabled uint64 = CBitFieldMaskBit0 + PerfBitInherit = CBitFieldMaskBit1 + PerfBitPinned = CBitFieldMaskBit2 + PerfBitExclusive = CBitFieldMaskBit3 + PerfBitExcludeUser = CBitFieldMaskBit4 + PerfBitExcludeKernel = CBitFieldMaskBit5 + PerfBitExcludeHv = CBitFieldMaskBit6 + PerfBitExcludeIdle = CBitFieldMaskBit7 + PerfBitMmap = CBitFieldMaskBit8 + PerfBitComm = CBitFieldMaskBit9 + PerfBitFreq = CBitFieldMaskBit10 + PerfBitInheritStat = CBitFieldMaskBit11 + PerfBitEnableOnExec = CBitFieldMaskBit12 + PerfBitTask = CBitFieldMaskBit13 + PerfBitWatermark = CBitFieldMaskBit14 + PerfBitPreciseIPBit1 = CBitFieldMaskBit15 + PerfBitPreciseIPBit2 = CBitFieldMaskBit16 + PerfBitMmapData = CBitFieldMaskBit17 + PerfBitSampleIDAll = CBitFieldMaskBit18 + PerfBitExcludeHost = CBitFieldMaskBit19 + PerfBitExcludeGuest = CBitFieldMaskBit20 + PerfBitExcludeCallchainKernel = CBitFieldMaskBit21 + PerfBitExcludeCallchainUser = CBitFieldMaskBit22 + PerfBitMmap2 = CBitFieldMaskBit23 + PerfBitCommExec = CBitFieldMaskBit24 + PerfBitUseClockID = CBitFieldMaskBit25 + PerfBitContextSwitch = CBitFieldMaskBit26 +) + +const ( + PERF_TYPE_HARDWARE = 0x0 + PERF_TYPE_SOFTWARE = 0x1 + PERF_TYPE_TRACEPOINT = 0x2 + PERF_TYPE_HW_CACHE = 0x3 + PERF_TYPE_RAW = 0x4 + PERF_TYPE_BREAKPOINT = 0x5 + + PERF_COUNT_HW_CPU_CYCLES = 0x0 + PERF_COUNT_HW_INSTRUCTIONS = 0x1 + PERF_COUNT_HW_CACHE_REFERENCES = 0x2 + PERF_COUNT_HW_CACHE_MISSES = 0x3 + PERF_COUNT_HW_BRANCH_INSTRUCTIONS = 0x4 + PERF_COUNT_HW_BRANCH_MISSES = 0x5 + PERF_COUNT_HW_BUS_CYCLES = 0x6 + PERF_COUNT_HW_STALLED_CYCLES_FRONTEND = 0x7 + PERF_COUNT_HW_STALLED_CYCLES_BACKEND = 0x8 + PERF_COUNT_HW_REF_CPU_CYCLES = 0x9 + + PERF_COUNT_HW_CACHE_L1D = 0x0 + PERF_COUNT_HW_CACHE_L1I = 0x1 + PERF_COUNT_HW_CACHE_LL = 0x2 + PERF_COUNT_HW_CACHE_DTLB = 0x3 + PERF_COUNT_HW_CACHE_ITLB = 0x4 + PERF_COUNT_HW_CACHE_BPU = 0x5 + PERF_COUNT_HW_CACHE_NODE = 0x6 + + PERF_COUNT_HW_CACHE_OP_READ = 0x0 + PERF_COUNT_HW_CACHE_OP_WRITE = 0x1 + PERF_COUNT_HW_CACHE_OP_PREFETCH = 0x2 + + PERF_COUNT_HW_CACHE_RESULT_ACCESS = 0x0 + PERF_COUNT_HW_CACHE_RESULT_MISS = 0x1 + + PERF_COUNT_SW_CPU_CLOCK = 0x0 + PERF_COUNT_SW_TASK_CLOCK = 0x1 + PERF_COUNT_SW_PAGE_FAULTS = 0x2 + PERF_COUNT_SW_CONTEXT_SWITCHES = 0x3 + PERF_COUNT_SW_CPU_MIGRATIONS = 0x4 + PERF_COUNT_SW_PAGE_FAULTS_MIN = 0x5 + PERF_COUNT_SW_PAGE_FAULTS_MAJ = 0x6 + PERF_COUNT_SW_ALIGNMENT_FAULTS = 0x7 + PERF_COUNT_SW_EMULATION_FAULTS = 0x8 + PERF_COUNT_SW_DUMMY = 0x9 + + PERF_SAMPLE_IP = 0x1 + PERF_SAMPLE_TID = 0x2 + PERF_SAMPLE_TIME = 0x4 + PERF_SAMPLE_ADDR = 0x8 + PERF_SAMPLE_READ = 0x10 + PERF_SAMPLE_CALLCHAIN = 0x20 + PERF_SAMPLE_ID = 0x40 + PERF_SAMPLE_CPU = 0x80 + PERF_SAMPLE_PERIOD = 0x100 + PERF_SAMPLE_STREAM_ID = 0x200 + PERF_SAMPLE_RAW = 0x400 + PERF_SAMPLE_BRANCH_STACK = 0x800 + + PERF_SAMPLE_BRANCH_USER = 0x1 + PERF_SAMPLE_BRANCH_KERNEL = 0x2 + PERF_SAMPLE_BRANCH_HV = 0x4 + PERF_SAMPLE_BRANCH_ANY = 0x8 + PERF_SAMPLE_BRANCH_ANY_CALL = 0x10 + PERF_SAMPLE_BRANCH_ANY_RETURN = 0x20 + PERF_SAMPLE_BRANCH_IND_CALL = 0x40 + + PERF_FORMAT_TOTAL_TIME_ENABLED = 0x1 + PERF_FORMAT_TOTAL_TIME_RUNNING = 0x2 + PERF_FORMAT_ID = 0x4 + PERF_FORMAT_GROUP = 0x8 + + PERF_RECORD_MMAP = 0x1 + PERF_RECORD_LOST = 0x2 + PERF_RECORD_COMM = 0x3 + PERF_RECORD_EXIT = 0x4 + PERF_RECORD_THROTTLE = 0x5 + PERF_RECORD_UNTHROTTLE = 0x6 + PERF_RECORD_FORK = 0x7 + PERF_RECORD_READ = 0x8 + PERF_RECORD_SAMPLE = 0x9 + + PERF_CONTEXT_HV = -0x20 + PERF_CONTEXT_KERNEL = -0x80 + PERF_CONTEXT_USER = -0x200 + + PERF_CONTEXT_GUEST = -0x800 + PERF_CONTEXT_GUEST_KERNEL = -0x880 + PERF_CONTEXT_GUEST_USER = -0xa00 + + PERF_FLAG_FD_NO_GROUP = 0x1 + PERF_FLAG_FD_OUTPUT = 0x2 + PERF_FLAG_PID_CGROUP = 0x4 +) + +const ( + CBitFieldMaskBit0 = 0x8000000000000000 + CBitFieldMaskBit1 = 0x4000000000000000 + CBitFieldMaskBit2 = 0x2000000000000000 + CBitFieldMaskBit3 = 0x1000000000000000 + CBitFieldMaskBit4 = 0x800000000000000 + CBitFieldMaskBit5 = 0x400000000000000 + CBitFieldMaskBit6 = 0x200000000000000 + CBitFieldMaskBit7 = 0x100000000000000 + CBitFieldMaskBit8 = 0x80000000000000 + CBitFieldMaskBit9 = 0x40000000000000 + CBitFieldMaskBit10 = 0x20000000000000 + CBitFieldMaskBit11 = 0x10000000000000 + CBitFieldMaskBit12 = 0x8000000000000 + CBitFieldMaskBit13 = 0x4000000000000 + CBitFieldMaskBit14 = 0x2000000000000 + CBitFieldMaskBit15 = 0x1000000000000 + CBitFieldMaskBit16 = 0x800000000000 + CBitFieldMaskBit17 = 0x400000000000 + CBitFieldMaskBit18 = 0x200000000000 + CBitFieldMaskBit19 = 0x100000000000 + CBitFieldMaskBit20 = 0x80000000000 + CBitFieldMaskBit21 = 0x40000000000 + CBitFieldMaskBit22 = 0x20000000000 + CBitFieldMaskBit23 = 0x10000000000 + CBitFieldMaskBit24 = 0x8000000000 + CBitFieldMaskBit25 = 0x4000000000 + CBitFieldMaskBit26 = 0x2000000000 + CBitFieldMaskBit27 = 0x1000000000 + CBitFieldMaskBit28 = 0x800000000 + CBitFieldMaskBit29 = 0x400000000 + CBitFieldMaskBit30 = 0x200000000 + CBitFieldMaskBit31 = 0x100000000 + CBitFieldMaskBit32 = 0x80000000 + CBitFieldMaskBit33 = 0x40000000 + CBitFieldMaskBit34 = 0x20000000 + CBitFieldMaskBit35 = 0x10000000 + CBitFieldMaskBit36 = 0x8000000 + CBitFieldMaskBit37 = 0x4000000 + CBitFieldMaskBit38 = 0x2000000 + CBitFieldMaskBit39 = 0x1000000 + CBitFieldMaskBit40 = 0x800000 + CBitFieldMaskBit41 = 0x400000 + CBitFieldMaskBit42 = 0x200000 + CBitFieldMaskBit43 = 0x100000 + CBitFieldMaskBit44 = 0x80000 + CBitFieldMaskBit45 = 0x40000 + CBitFieldMaskBit46 = 0x20000 + CBitFieldMaskBit47 = 0x10000 + CBitFieldMaskBit48 = 0x8000 + CBitFieldMaskBit49 = 0x4000 + CBitFieldMaskBit50 = 0x2000 + CBitFieldMaskBit51 = 0x1000 + CBitFieldMaskBit52 = 0x800 + CBitFieldMaskBit53 = 0x400 + CBitFieldMaskBit54 = 0x200 + CBitFieldMaskBit55 = 0x100 + CBitFieldMaskBit56 = 0x80 + CBitFieldMaskBit57 = 0x40 + CBitFieldMaskBit58 = 0x20 + CBitFieldMaskBit59 = 0x10 + CBitFieldMaskBit60 = 0x8 + CBitFieldMaskBit61 = 0x4 + CBitFieldMaskBit62 = 0x2 + CBitFieldMaskBit63 = 0x1 +) + +type SockaddrStorage struct { + Family uint16 + _ [118]int8 + _ uint64 +} + +type TCPMD5Sig struct { + Addr SockaddrStorage + Flags uint8 + Prefixlen uint8 + Keylen uint16 + _ uint32 + Key [80]uint8 +} + +type HDDriveCmdHdr struct { + Command uint8 + Number uint8 + Feature uint8 + Count uint8 +} + +type HDGeometry struct { + Heads uint8 + Sectors uint8 + Cylinders uint16 + _ [4]byte + Start uint64 +} + +type HDDriveID struct { + Config uint16 + Cyls uint16 + Reserved2 uint16 + Heads uint16 + Track_bytes uint16 + Sector_bytes uint16 + Sectors uint16 + Vendor0 uint16 + Vendor1 uint16 + Vendor2 uint16 + Serial_no [20]uint8 + Buf_type uint16 + Buf_size uint16 + Ecc_bytes uint16 + Fw_rev [8]uint8 + Model [40]uint8 + Max_multsect uint8 + Vendor3 uint8 + Dword_io uint16 + Vendor4 uint8 + Capability uint8 + Reserved50 uint16 + Vendor5 uint8 + TPIO uint8 + Vendor6 uint8 + TDMA uint8 + Field_valid uint16 + Cur_cyls uint16 + Cur_heads uint16 + Cur_sectors uint16 + Cur_capacity0 uint16 + Cur_capacity1 uint16 + Multsect uint8 + Multsect_valid uint8 + Lba_capacity uint32 + Dma_1word uint16 + Dma_mword uint16 + Eide_pio_modes uint16 + Eide_dma_min uint16 + Eide_dma_time uint16 + Eide_pio uint16 + Eide_pio_iordy uint16 + Words69_70 [2]uint16 + Words71_74 [4]uint16 + Queue_depth uint16 + Words76_79 [4]uint16 + Major_rev_num uint16 + Minor_rev_num uint16 + Command_set_1 uint16 + Command_set_2 uint16 + Cfsse uint16 + Cfs_enable_1 uint16 + Cfs_enable_2 uint16 + Csf_default uint16 + Dma_ultra uint16 + Trseuc uint16 + TrsEuc uint16 + CurAPMvalues uint16 + Mprc uint16 + Hw_config uint16 + Acoustic uint16 + Msrqs uint16 + Sxfert uint16 + Sal uint16 + Spg uint32 + Lba_capacity_2 uint64 + Words104_125 [22]uint16 + Last_lun uint16 + Word127 uint16 + Dlf uint16 + Csfo uint16 + Words130_155 [26]uint16 + Word156 uint16 + Words157_159 [3]uint16 + Cfa_power uint16 + Words161_175 [15]uint16 + Words176_205 [30]uint16 + Words206_254 [49]uint16 + Integrity_word uint16 +} + +type Statfs_t struct { + Type uint32 + Bsize uint32 + Blocks uint64 + Bfree uint64 + Bavail uint64 + Files uint64 + Ffree uint64 + Fsid Fsid + Namelen uint32 + Frsize uint32 + Flags uint32 + Spare [4]uint32 + _ [4]byte +} + +const ( + ST_MANDLOCK = 0x40 + ST_NOATIME = 0x400 + ST_NODEV = 0x4 + ST_NODIRATIME = 0x800 + ST_NOEXEC = 0x8 + ST_NOSUID = 0x2 + ST_RDONLY = 0x1 + ST_RELATIME = 0x1000 + ST_SYNCHRONOUS = 0x10 +) + +type TpacketHdr struct { + Status uint64 + Len uint32 + Snaplen uint32 + Mac uint16 + Net uint16 + Sec uint32 + Usec uint32 + _ [4]byte +} + +type Tpacket2Hdr struct { + Status uint32 + Len uint32 + Snaplen uint32 + Mac uint16 + Net uint16 + Sec uint32 + Nsec uint32 + Vlan_tci uint16 + Vlan_tpid uint16 + _ [4]uint8 +} + +type Tpacket3Hdr struct { + Next_offset uint32 + Sec uint32 + Nsec uint32 + Snaplen uint32 + Len uint32 + Status uint32 + Mac uint16 + Net uint16 + Hv1 TpacketHdrVariant1 + _ [8]uint8 +} + +type TpacketHdrVariant1 struct { + Rxhash uint32 + Vlan_tci uint32 + Vlan_tpid uint16 + _ uint16 +} + +type TpacketBlockDesc struct { + Version uint32 + To_priv uint32 + Hdr [40]byte +} + +type TpacketReq struct { + Block_size uint32 + Block_nr uint32 + Frame_size uint32 + Frame_nr uint32 +} + +type TpacketReq3 struct { + Block_size uint32 + Block_nr uint32 + Frame_size uint32 + Frame_nr uint32 + Retire_blk_tov uint32 + Sizeof_priv uint32 + Feature_req_word uint32 +} + +type TpacketStats struct { + Packets uint32 + Drops uint32 +} + +type TpacketStatsV3 struct { + Packets uint32 + Drops uint32 + Freeze_q_cnt uint32 +} + +type TpacketAuxdata struct { + Status uint32 + Len uint32 + Snaplen uint32 + Mac uint16 + Net uint16 + Vlan_tci uint16 + Vlan_tpid uint16 +} + +const ( + TPACKET_V1 = 0x0 + TPACKET_V2 = 0x1 + TPACKET_V3 = 0x2 +) + +const ( + SizeofTpacketHdr = 0x20 + SizeofTpacket2Hdr = 0x20 + SizeofTpacket3Hdr = 0x30 +) + +const ( + NF_INET_PRE_ROUTING = 0x0 + NF_INET_LOCAL_IN = 0x1 + NF_INET_FORWARD = 0x2 + NF_INET_LOCAL_OUT = 0x3 + NF_INET_POST_ROUTING = 0x4 + NF_INET_NUMHOOKS = 0x5 +) + +const ( + NF_NETDEV_INGRESS = 0x0 + NF_NETDEV_NUMHOOKS = 0x1 +) + +const ( + NFPROTO_UNSPEC = 0x0 + NFPROTO_INET = 0x1 + NFPROTO_IPV4 = 0x2 + NFPROTO_ARP = 0x3 + NFPROTO_NETDEV = 0x5 + NFPROTO_BRIDGE = 0x7 + NFPROTO_IPV6 = 0xa + NFPROTO_DECNET = 0xc + NFPROTO_NUMPROTO = 0xd +) + +type Nfgenmsg struct { + Nfgen_family uint8 + Version uint8 + Res_id uint16 +} + +const ( + NFNL_BATCH_UNSPEC = 0x0 + NFNL_BATCH_GENID = 0x1 +) + +const ( + NFT_REG_VERDICT = 0x0 + NFT_REG_1 = 0x1 + NFT_REG_2 = 0x2 + NFT_REG_3 = 0x3 + NFT_REG_4 = 0x4 + NFT_REG32_00 = 0x8 + NFT_REG32_01 = 0x9 + NFT_REG32_02 = 0xa + NFT_REG32_03 = 0xb + NFT_REG32_04 = 0xc + NFT_REG32_05 = 0xd + NFT_REG32_06 = 0xe + NFT_REG32_07 = 0xf + NFT_REG32_08 = 0x10 + NFT_REG32_09 = 0x11 + NFT_REG32_10 = 0x12 + NFT_REG32_11 = 0x13 + NFT_REG32_12 = 0x14 + NFT_REG32_13 = 0x15 + NFT_REG32_14 = 0x16 + NFT_REG32_15 = 0x17 + NFT_CONTINUE = -0x1 + NFT_BREAK = -0x2 + NFT_JUMP = -0x3 + NFT_GOTO = -0x4 + NFT_RETURN = -0x5 + NFT_MSG_NEWTABLE = 0x0 + NFT_MSG_GETTABLE = 0x1 + NFT_MSG_DELTABLE = 0x2 + NFT_MSG_NEWCHAIN = 0x3 + NFT_MSG_GETCHAIN = 0x4 + NFT_MSG_DELCHAIN = 0x5 + NFT_MSG_NEWRULE = 0x6 + NFT_MSG_GETRULE = 0x7 + NFT_MSG_DELRULE = 0x8 + NFT_MSG_NEWSET = 0x9 + NFT_MSG_GETSET = 0xa + NFT_MSG_DELSET = 0xb + NFT_MSG_NEWSETELEM = 0xc + NFT_MSG_GETSETELEM = 0xd + NFT_MSG_DELSETELEM = 0xe + NFT_MSG_NEWGEN = 0xf + NFT_MSG_GETGEN = 0x10 + NFT_MSG_TRACE = 0x11 + NFT_MSG_NEWOBJ = 0x12 + NFT_MSG_GETOBJ = 0x13 + NFT_MSG_DELOBJ = 0x14 + NFT_MSG_GETOBJ_RESET = 0x15 + NFT_MSG_MAX = 0x19 + NFTA_LIST_UNPEC = 0x0 + NFTA_LIST_ELEM = 0x1 + NFTA_HOOK_UNSPEC = 0x0 + NFTA_HOOK_HOOKNUM = 0x1 + NFTA_HOOK_PRIORITY = 0x2 + NFTA_HOOK_DEV = 0x3 + NFT_TABLE_F_DORMANT = 0x1 + NFTA_TABLE_UNSPEC = 0x0 + NFTA_TABLE_NAME = 0x1 + NFTA_TABLE_FLAGS = 0x2 + NFTA_TABLE_USE = 0x3 + NFTA_CHAIN_UNSPEC = 0x0 + NFTA_CHAIN_TABLE = 0x1 + NFTA_CHAIN_HANDLE = 0x2 + NFTA_CHAIN_NAME = 0x3 + NFTA_CHAIN_HOOK = 0x4 + NFTA_CHAIN_POLICY = 0x5 + NFTA_CHAIN_USE = 0x6 + NFTA_CHAIN_TYPE = 0x7 + NFTA_CHAIN_COUNTERS = 0x8 + NFTA_CHAIN_PAD = 0x9 + NFTA_RULE_UNSPEC = 0x0 + NFTA_RULE_TABLE = 0x1 + NFTA_RULE_CHAIN = 0x2 + NFTA_RULE_HANDLE = 0x3 + NFTA_RULE_EXPRESSIONS = 0x4 + NFTA_RULE_COMPAT = 0x5 + NFTA_RULE_POSITION = 0x6 + NFTA_RULE_USERDATA = 0x7 + NFTA_RULE_PAD = 0x8 + NFTA_RULE_ID = 0x9 + NFT_RULE_COMPAT_F_INV = 0x2 + NFT_RULE_COMPAT_F_MASK = 0x2 + NFTA_RULE_COMPAT_UNSPEC = 0x0 + NFTA_RULE_COMPAT_PROTO = 0x1 + NFTA_RULE_COMPAT_FLAGS = 0x2 + NFT_SET_ANONYMOUS = 0x1 + NFT_SET_CONSTANT = 0x2 + NFT_SET_INTERVAL = 0x4 + NFT_SET_MAP = 0x8 + NFT_SET_TIMEOUT = 0x10 + NFT_SET_EVAL = 0x20 + NFT_SET_OBJECT = 0x40 + NFT_SET_POL_PERFORMANCE = 0x0 + NFT_SET_POL_MEMORY = 0x1 + NFTA_SET_DESC_UNSPEC = 0x0 + NFTA_SET_DESC_SIZE = 0x1 + NFTA_SET_UNSPEC = 0x0 + NFTA_SET_TABLE = 0x1 + NFTA_SET_NAME = 0x2 + NFTA_SET_FLAGS = 0x3 + NFTA_SET_KEY_TYPE = 0x4 + NFTA_SET_KEY_LEN = 0x5 + NFTA_SET_DATA_TYPE = 0x6 + NFTA_SET_DATA_LEN = 0x7 + NFTA_SET_POLICY = 0x8 + NFTA_SET_DESC = 0x9 + NFTA_SET_ID = 0xa + NFTA_SET_TIMEOUT = 0xb + NFTA_SET_GC_INTERVAL = 0xc + NFTA_SET_USERDATA = 0xd + NFTA_SET_PAD = 0xe + NFTA_SET_OBJ_TYPE = 0xf + NFT_SET_ELEM_INTERVAL_END = 0x1 + NFTA_SET_ELEM_UNSPEC = 0x0 + NFTA_SET_ELEM_KEY = 0x1 + NFTA_SET_ELEM_DATA = 0x2 + NFTA_SET_ELEM_FLAGS = 0x3 + NFTA_SET_ELEM_TIMEOUT = 0x4 + NFTA_SET_ELEM_EXPIRATION = 0x5 + NFTA_SET_ELEM_USERDATA = 0x6 + NFTA_SET_ELEM_EXPR = 0x7 + NFTA_SET_ELEM_PAD = 0x8 + NFTA_SET_ELEM_OBJREF = 0x9 + NFTA_SET_ELEM_LIST_UNSPEC = 0x0 + NFTA_SET_ELEM_LIST_TABLE = 0x1 + NFTA_SET_ELEM_LIST_SET = 0x2 + NFTA_SET_ELEM_LIST_ELEMENTS = 0x3 + NFTA_SET_ELEM_LIST_SET_ID = 0x4 + NFT_DATA_VALUE = 0x0 + NFT_DATA_VERDICT = 0xffffff00 + NFTA_DATA_UNSPEC = 0x0 + NFTA_DATA_VALUE = 0x1 + NFTA_DATA_VERDICT = 0x2 + NFTA_VERDICT_UNSPEC = 0x0 + NFTA_VERDICT_CODE = 0x1 + NFTA_VERDICT_CHAIN = 0x2 + NFTA_EXPR_UNSPEC = 0x0 + NFTA_EXPR_NAME = 0x1 + NFTA_EXPR_DATA = 0x2 + NFTA_IMMEDIATE_UNSPEC = 0x0 + NFTA_IMMEDIATE_DREG = 0x1 + NFTA_IMMEDIATE_DATA = 0x2 + NFTA_BITWISE_UNSPEC = 0x0 + NFTA_BITWISE_SREG = 0x1 + NFTA_BITWISE_DREG = 0x2 + NFTA_BITWISE_LEN = 0x3 + NFTA_BITWISE_MASK = 0x4 + NFTA_BITWISE_XOR = 0x5 + NFT_BYTEORDER_NTOH = 0x0 + NFT_BYTEORDER_HTON = 0x1 + NFTA_BYTEORDER_UNSPEC = 0x0 + NFTA_BYTEORDER_SREG = 0x1 + NFTA_BYTEORDER_DREG = 0x2 + NFTA_BYTEORDER_OP = 0x3 + NFTA_BYTEORDER_LEN = 0x4 + NFTA_BYTEORDER_SIZE = 0x5 + NFT_CMP_EQ = 0x0 + NFT_CMP_NEQ = 0x1 + NFT_CMP_LT = 0x2 + NFT_CMP_LTE = 0x3 + NFT_CMP_GT = 0x4 + NFT_CMP_GTE = 0x5 + NFTA_CMP_UNSPEC = 0x0 + NFTA_CMP_SREG = 0x1 + NFTA_CMP_OP = 0x2 + NFTA_CMP_DATA = 0x3 + NFT_RANGE_EQ = 0x0 + NFT_RANGE_NEQ = 0x1 + NFTA_RANGE_UNSPEC = 0x0 + NFTA_RANGE_SREG = 0x1 + NFTA_RANGE_OP = 0x2 + NFTA_RANGE_FROM_DATA = 0x3 + NFTA_RANGE_TO_DATA = 0x4 + NFT_LOOKUP_F_INV = 0x1 + NFTA_LOOKUP_UNSPEC = 0x0 + NFTA_LOOKUP_SET = 0x1 + NFTA_LOOKUP_SREG = 0x2 + NFTA_LOOKUP_DREG = 0x3 + NFTA_LOOKUP_SET_ID = 0x4 + NFTA_LOOKUP_FLAGS = 0x5 + NFT_DYNSET_OP_ADD = 0x0 + NFT_DYNSET_OP_UPDATE = 0x1 + NFT_DYNSET_F_INV = 0x1 + NFTA_DYNSET_UNSPEC = 0x0 + NFTA_DYNSET_SET_NAME = 0x1 + NFTA_DYNSET_SET_ID = 0x2 + NFTA_DYNSET_OP = 0x3 + NFTA_DYNSET_SREG_KEY = 0x4 + NFTA_DYNSET_SREG_DATA = 0x5 + NFTA_DYNSET_TIMEOUT = 0x6 + NFTA_DYNSET_EXPR = 0x7 + NFTA_DYNSET_PAD = 0x8 + NFTA_DYNSET_FLAGS = 0x9 + NFT_PAYLOAD_LL_HEADER = 0x0 + NFT_PAYLOAD_NETWORK_HEADER = 0x1 + NFT_PAYLOAD_TRANSPORT_HEADER = 0x2 + NFT_PAYLOAD_CSUM_NONE = 0x0 + NFT_PAYLOAD_CSUM_INET = 0x1 + NFT_PAYLOAD_L4CSUM_PSEUDOHDR = 0x1 + NFTA_PAYLOAD_UNSPEC = 0x0 + NFTA_PAYLOAD_DREG = 0x1 + NFTA_PAYLOAD_BASE = 0x2 + NFTA_PAYLOAD_OFFSET = 0x3 + NFTA_PAYLOAD_LEN = 0x4 + NFTA_PAYLOAD_SREG = 0x5 + NFTA_PAYLOAD_CSUM_TYPE = 0x6 + NFTA_PAYLOAD_CSUM_OFFSET = 0x7 + NFTA_PAYLOAD_CSUM_FLAGS = 0x8 + NFT_EXTHDR_F_PRESENT = 0x1 + NFT_EXTHDR_OP_IPV6 = 0x0 + NFT_EXTHDR_OP_TCPOPT = 0x1 + NFTA_EXTHDR_UNSPEC = 0x0 + NFTA_EXTHDR_DREG = 0x1 + NFTA_EXTHDR_TYPE = 0x2 + NFTA_EXTHDR_OFFSET = 0x3 + NFTA_EXTHDR_LEN = 0x4 + NFTA_EXTHDR_FLAGS = 0x5 + NFTA_EXTHDR_OP = 0x6 + NFTA_EXTHDR_SREG = 0x7 + NFT_META_LEN = 0x0 + NFT_META_PROTOCOL = 0x1 + NFT_META_PRIORITY = 0x2 + NFT_META_MARK = 0x3 + NFT_META_IIF = 0x4 + NFT_META_OIF = 0x5 + NFT_META_IIFNAME = 0x6 + NFT_META_OIFNAME = 0x7 + NFT_META_IIFTYPE = 0x8 + NFT_META_OIFTYPE = 0x9 + NFT_META_SKUID = 0xa + NFT_META_SKGID = 0xb + NFT_META_NFTRACE = 0xc + NFT_META_RTCLASSID = 0xd + NFT_META_SECMARK = 0xe + NFT_META_NFPROTO = 0xf + NFT_META_L4PROTO = 0x10 + NFT_META_BRI_IIFNAME = 0x11 + NFT_META_BRI_OIFNAME = 0x12 + NFT_META_PKTTYPE = 0x13 + NFT_META_CPU = 0x14 + NFT_META_IIFGROUP = 0x15 + NFT_META_OIFGROUP = 0x16 + NFT_META_CGROUP = 0x17 + NFT_META_PRANDOM = 0x18 + NFT_RT_CLASSID = 0x0 + NFT_RT_NEXTHOP4 = 0x1 + NFT_RT_NEXTHOP6 = 0x2 + NFT_RT_TCPMSS = 0x3 + NFT_HASH_JENKINS = 0x0 + NFT_HASH_SYM = 0x1 + NFTA_HASH_UNSPEC = 0x0 + NFTA_HASH_SREG = 0x1 + NFTA_HASH_DREG = 0x2 + NFTA_HASH_LEN = 0x3 + NFTA_HASH_MODULUS = 0x4 + NFTA_HASH_SEED = 0x5 + NFTA_HASH_OFFSET = 0x6 + NFTA_HASH_TYPE = 0x7 + NFTA_META_UNSPEC = 0x0 + NFTA_META_DREG = 0x1 + NFTA_META_KEY = 0x2 + NFTA_META_SREG = 0x3 + NFTA_RT_UNSPEC = 0x0 + NFTA_RT_DREG = 0x1 + NFTA_RT_KEY = 0x2 + NFT_CT_STATE = 0x0 + NFT_CT_DIRECTION = 0x1 + NFT_CT_STATUS = 0x2 + NFT_CT_MARK = 0x3 + NFT_CT_SECMARK = 0x4 + NFT_CT_EXPIRATION = 0x5 + NFT_CT_HELPER = 0x6 + NFT_CT_L3PROTOCOL = 0x7 + NFT_CT_SRC = 0x8 + NFT_CT_DST = 0x9 + NFT_CT_PROTOCOL = 0xa + NFT_CT_PROTO_SRC = 0xb + NFT_CT_PROTO_DST = 0xc + NFT_CT_LABELS = 0xd + NFT_CT_PKTS = 0xe + NFT_CT_BYTES = 0xf + NFT_CT_AVGPKT = 0x10 + NFT_CT_ZONE = 0x11 + NFT_CT_EVENTMASK = 0x12 + NFTA_CT_UNSPEC = 0x0 + NFTA_CT_DREG = 0x1 + NFTA_CT_KEY = 0x2 + NFTA_CT_DIRECTION = 0x3 + NFTA_CT_SREG = 0x4 + NFT_LIMIT_PKTS = 0x0 + NFT_LIMIT_PKT_BYTES = 0x1 + NFT_LIMIT_F_INV = 0x1 + NFTA_LIMIT_UNSPEC = 0x0 + NFTA_LIMIT_RATE = 0x1 + NFTA_LIMIT_UNIT = 0x2 + NFTA_LIMIT_BURST = 0x3 + NFTA_LIMIT_TYPE = 0x4 + NFTA_LIMIT_FLAGS = 0x5 + NFTA_LIMIT_PAD = 0x6 + NFTA_COUNTER_UNSPEC = 0x0 + NFTA_COUNTER_BYTES = 0x1 + NFTA_COUNTER_PACKETS = 0x2 + NFTA_COUNTER_PAD = 0x3 + NFTA_LOG_UNSPEC = 0x0 + NFTA_LOG_GROUP = 0x1 + NFTA_LOG_PREFIX = 0x2 + NFTA_LOG_SNAPLEN = 0x3 + NFTA_LOG_QTHRESHOLD = 0x4 + NFTA_LOG_LEVEL = 0x5 + NFTA_LOG_FLAGS = 0x6 + NFTA_QUEUE_UNSPEC = 0x0 + NFTA_QUEUE_NUM = 0x1 + NFTA_QUEUE_TOTAL = 0x2 + NFTA_QUEUE_FLAGS = 0x3 + NFTA_QUEUE_SREG_QNUM = 0x4 + NFT_QUOTA_F_INV = 0x1 + NFT_QUOTA_F_DEPLETED = 0x2 + NFTA_QUOTA_UNSPEC = 0x0 + NFTA_QUOTA_BYTES = 0x1 + NFTA_QUOTA_FLAGS = 0x2 + NFTA_QUOTA_PAD = 0x3 + NFTA_QUOTA_CONSUMED = 0x4 + NFT_REJECT_ICMP_UNREACH = 0x0 + NFT_REJECT_TCP_RST = 0x1 + NFT_REJECT_ICMPX_UNREACH = 0x2 + NFT_REJECT_ICMPX_NO_ROUTE = 0x0 + NFT_REJECT_ICMPX_PORT_UNREACH = 0x1 + NFT_REJECT_ICMPX_HOST_UNREACH = 0x2 + NFT_REJECT_ICMPX_ADMIN_PROHIBITED = 0x3 + NFTA_REJECT_UNSPEC = 0x0 + NFTA_REJECT_TYPE = 0x1 + NFTA_REJECT_ICMP_CODE = 0x2 + NFT_NAT_SNAT = 0x0 + NFT_NAT_DNAT = 0x1 + NFTA_NAT_UNSPEC = 0x0 + NFTA_NAT_TYPE = 0x1 + NFTA_NAT_FAMILY = 0x2 + NFTA_NAT_REG_ADDR_MIN = 0x3 + NFTA_NAT_REG_ADDR_MAX = 0x4 + NFTA_NAT_REG_PROTO_MIN = 0x5 + NFTA_NAT_REG_PROTO_MAX = 0x6 + NFTA_NAT_FLAGS = 0x7 + NFTA_MASQ_UNSPEC = 0x0 + NFTA_MASQ_FLAGS = 0x1 + NFTA_MASQ_REG_PROTO_MIN = 0x2 + NFTA_MASQ_REG_PROTO_MAX = 0x3 + NFTA_REDIR_UNSPEC = 0x0 + NFTA_REDIR_REG_PROTO_MIN = 0x1 + NFTA_REDIR_REG_PROTO_MAX = 0x2 + NFTA_REDIR_FLAGS = 0x3 + NFTA_DUP_UNSPEC = 0x0 + NFTA_DUP_SREG_ADDR = 0x1 + NFTA_DUP_SREG_DEV = 0x2 + NFTA_FWD_UNSPEC = 0x0 + NFTA_FWD_SREG_DEV = 0x1 + NFTA_OBJREF_UNSPEC = 0x0 + NFTA_OBJREF_IMM_TYPE = 0x1 + NFTA_OBJREF_IMM_NAME = 0x2 + NFTA_OBJREF_SET_SREG = 0x3 + NFTA_OBJREF_SET_NAME = 0x4 + NFTA_OBJREF_SET_ID = 0x5 + NFTA_GEN_UNSPEC = 0x0 + NFTA_GEN_ID = 0x1 + NFTA_GEN_PROC_PID = 0x2 + NFTA_GEN_PROC_NAME = 0x3 + NFTA_FIB_UNSPEC = 0x0 + NFTA_FIB_DREG = 0x1 + NFTA_FIB_RESULT = 0x2 + NFTA_FIB_FLAGS = 0x3 + NFT_FIB_RESULT_UNSPEC = 0x0 + NFT_FIB_RESULT_OIF = 0x1 + NFT_FIB_RESULT_OIFNAME = 0x2 + NFT_FIB_RESULT_ADDRTYPE = 0x3 + NFTA_FIB_F_SADDR = 0x1 + NFTA_FIB_F_DADDR = 0x2 + NFTA_FIB_F_MARK = 0x4 + NFTA_FIB_F_IIF = 0x8 + NFTA_FIB_F_OIF = 0x10 + NFTA_FIB_F_PRESENT = 0x20 + NFTA_CT_HELPER_UNSPEC = 0x0 + NFTA_CT_HELPER_NAME = 0x1 + NFTA_CT_HELPER_L3PROTO = 0x2 + NFTA_CT_HELPER_L4PROTO = 0x3 + NFTA_OBJ_UNSPEC = 0x0 + NFTA_OBJ_TABLE = 0x1 + NFTA_OBJ_NAME = 0x2 + NFTA_OBJ_TYPE = 0x3 + NFTA_OBJ_DATA = 0x4 + NFTA_OBJ_USE = 0x5 + NFTA_TRACE_UNSPEC = 0x0 + NFTA_TRACE_TABLE = 0x1 + NFTA_TRACE_CHAIN = 0x2 + NFTA_TRACE_RULE_HANDLE = 0x3 + NFTA_TRACE_TYPE = 0x4 + NFTA_TRACE_VERDICT = 0x5 + NFTA_TRACE_ID = 0x6 + NFTA_TRACE_LL_HEADER = 0x7 + NFTA_TRACE_NETWORK_HEADER = 0x8 + NFTA_TRACE_TRANSPORT_HEADER = 0x9 + NFTA_TRACE_IIF = 0xa + NFTA_TRACE_IIFTYPE = 0xb + NFTA_TRACE_OIF = 0xc + NFTA_TRACE_OIFTYPE = 0xd + NFTA_TRACE_MARK = 0xe + NFTA_TRACE_NFPROTO = 0xf + NFTA_TRACE_POLICY = 0x10 + NFTA_TRACE_PAD = 0x11 + NFT_TRACETYPE_UNSPEC = 0x0 + NFT_TRACETYPE_POLICY = 0x1 + NFT_TRACETYPE_RETURN = 0x2 + NFT_TRACETYPE_RULE = 0x3 + NFTA_NG_UNSPEC = 0x0 + NFTA_NG_DREG = 0x1 + NFTA_NG_MODULUS = 0x2 + NFTA_NG_TYPE = 0x3 + NFTA_NG_OFFSET = 0x4 + NFT_NG_INCREMENTAL = 0x0 + NFT_NG_RANDOM = 0x1 +) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go index 9dbbb1ce525..8e7384b89ca 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go @@ -376,97 +376,123 @@ const ( ) const ( - IFA_UNSPEC = 0x0 - IFA_ADDRESS = 0x1 - IFA_LOCAL = 0x2 - IFA_LABEL = 0x3 - IFA_BROADCAST = 0x4 - IFA_ANYCAST = 0x5 - IFA_CACHEINFO = 0x6 - IFA_MULTICAST = 0x7 - IFLA_UNSPEC = 0x0 - IFLA_ADDRESS = 0x1 - IFLA_BROADCAST = 0x2 - IFLA_IFNAME = 0x3 - IFLA_MTU = 0x4 - IFLA_LINK = 0x5 - IFLA_QDISC = 0x6 - IFLA_STATS = 0x7 - IFLA_COST = 0x8 - IFLA_PRIORITY = 0x9 - IFLA_MASTER = 0xa - IFLA_WIRELESS = 0xb - IFLA_PROTINFO = 0xc - IFLA_TXQLEN = 0xd - IFLA_MAP = 0xe - IFLA_WEIGHT = 0xf - IFLA_OPERSTATE = 0x10 - IFLA_LINKMODE = 0x11 - IFLA_LINKINFO = 0x12 - IFLA_NET_NS_PID = 0x13 - IFLA_IFALIAS = 0x14 - IFLA_MAX = 0x2a - RT_SCOPE_UNIVERSE = 0x0 - RT_SCOPE_SITE = 0xc8 - RT_SCOPE_LINK = 0xfd - RT_SCOPE_HOST = 0xfe - RT_SCOPE_NOWHERE = 0xff - RT_TABLE_UNSPEC = 0x0 - RT_TABLE_COMPAT = 0xfc - RT_TABLE_DEFAULT = 0xfd - RT_TABLE_MAIN = 0xfe - RT_TABLE_LOCAL = 0xff - RT_TABLE_MAX = 0xffffffff - RTA_UNSPEC = 0x0 - RTA_DST = 0x1 - RTA_SRC = 0x2 - RTA_IIF = 0x3 - RTA_OIF = 0x4 - RTA_GATEWAY = 0x5 - RTA_PRIORITY = 0x6 - RTA_PREFSRC = 0x7 - RTA_METRICS = 0x8 - RTA_MULTIPATH = 0x9 - RTA_FLOW = 0xb - RTA_CACHEINFO = 0xc - RTA_TABLE = 0xf - RTN_UNSPEC = 0x0 - RTN_UNICAST = 0x1 - RTN_LOCAL = 0x2 - RTN_BROADCAST = 0x3 - RTN_ANYCAST = 0x4 - RTN_MULTICAST = 0x5 - RTN_BLACKHOLE = 0x6 - RTN_UNREACHABLE = 0x7 - RTN_PROHIBIT = 0x8 - RTN_THROW = 0x9 - RTN_NAT = 0xa - RTN_XRESOLVE = 0xb - RTNLGRP_NONE = 0x0 - RTNLGRP_LINK = 0x1 - RTNLGRP_NOTIFY = 0x2 - RTNLGRP_NEIGH = 0x3 - RTNLGRP_TC = 0x4 - RTNLGRP_IPV4_IFADDR = 0x5 - RTNLGRP_IPV4_MROUTE = 0x6 - RTNLGRP_IPV4_ROUTE = 0x7 - RTNLGRP_IPV4_RULE = 0x8 - RTNLGRP_IPV6_IFADDR = 0x9 - RTNLGRP_IPV6_MROUTE = 0xa - RTNLGRP_IPV6_ROUTE = 0xb - RTNLGRP_IPV6_IFINFO = 0xc - RTNLGRP_IPV6_PREFIX = 0x12 - RTNLGRP_IPV6_RULE = 0x13 - RTNLGRP_ND_USEROPT = 0x14 - SizeofNlMsghdr = 0x10 - SizeofNlMsgerr = 0x14 - SizeofRtGenmsg = 0x1 - SizeofNlAttr = 0x4 - SizeofRtAttr = 0x4 - SizeofIfInfomsg = 0x10 - SizeofIfAddrmsg = 0x8 - SizeofRtMsg = 0xc - SizeofRtNexthop = 0x8 + IFA_UNSPEC = 0x0 + IFA_ADDRESS = 0x1 + IFA_LOCAL = 0x2 + IFA_LABEL = 0x3 + IFA_BROADCAST = 0x4 + IFA_ANYCAST = 0x5 + IFA_CACHEINFO = 0x6 + IFA_MULTICAST = 0x7 + IFLA_UNSPEC = 0x0 + IFLA_ADDRESS = 0x1 + IFLA_BROADCAST = 0x2 + IFLA_IFNAME = 0x3 + IFLA_MTU = 0x4 + IFLA_LINK = 0x5 + IFLA_QDISC = 0x6 + IFLA_STATS = 0x7 + IFLA_COST = 0x8 + IFLA_PRIORITY = 0x9 + IFLA_MASTER = 0xa + IFLA_WIRELESS = 0xb + IFLA_PROTINFO = 0xc + IFLA_TXQLEN = 0xd + IFLA_MAP = 0xe + IFLA_WEIGHT = 0xf + IFLA_OPERSTATE = 0x10 + IFLA_LINKMODE = 0x11 + IFLA_LINKINFO = 0x12 + IFLA_NET_NS_PID = 0x13 + IFLA_IFALIAS = 0x14 + IFLA_NUM_VF = 0x15 + IFLA_VFINFO_LIST = 0x16 + IFLA_STATS64 = 0x17 + IFLA_VF_PORTS = 0x18 + IFLA_PORT_SELF = 0x19 + IFLA_AF_SPEC = 0x1a + IFLA_GROUP = 0x1b + IFLA_NET_NS_FD = 0x1c + IFLA_EXT_MASK = 0x1d + IFLA_PROMISCUITY = 0x1e + IFLA_NUM_TX_QUEUES = 0x1f + IFLA_NUM_RX_QUEUES = 0x20 + IFLA_CARRIER = 0x21 + IFLA_PHYS_PORT_ID = 0x22 + IFLA_CARRIER_CHANGES = 0x23 + IFLA_PHYS_SWITCH_ID = 0x24 + IFLA_LINK_NETNSID = 0x25 + IFLA_PHYS_PORT_NAME = 0x26 + IFLA_PROTO_DOWN = 0x27 + IFLA_GSO_MAX_SEGS = 0x28 + IFLA_GSO_MAX_SIZE = 0x29 + IFLA_PAD = 0x2a + IFLA_XDP = 0x2b + IFLA_EVENT = 0x2c + IFLA_NEW_NETNSID = 0x2d + IFLA_IF_NETNSID = 0x2e + IFLA_MAX = 0x2e + RT_SCOPE_UNIVERSE = 0x0 + RT_SCOPE_SITE = 0xc8 + RT_SCOPE_LINK = 0xfd + RT_SCOPE_HOST = 0xfe + RT_SCOPE_NOWHERE = 0xff + RT_TABLE_UNSPEC = 0x0 + RT_TABLE_COMPAT = 0xfc + RT_TABLE_DEFAULT = 0xfd + RT_TABLE_MAIN = 0xfe + RT_TABLE_LOCAL = 0xff + RT_TABLE_MAX = 0xffffffff + RTA_UNSPEC = 0x0 + RTA_DST = 0x1 + RTA_SRC = 0x2 + RTA_IIF = 0x3 + RTA_OIF = 0x4 + RTA_GATEWAY = 0x5 + RTA_PRIORITY = 0x6 + RTA_PREFSRC = 0x7 + RTA_METRICS = 0x8 + RTA_MULTIPATH = 0x9 + RTA_FLOW = 0xb + RTA_CACHEINFO = 0xc + RTA_TABLE = 0xf + RTN_UNSPEC = 0x0 + RTN_UNICAST = 0x1 + RTN_LOCAL = 0x2 + RTN_BROADCAST = 0x3 + RTN_ANYCAST = 0x4 + RTN_MULTICAST = 0x5 + RTN_BLACKHOLE = 0x6 + RTN_UNREACHABLE = 0x7 + RTN_PROHIBIT = 0x8 + RTN_THROW = 0x9 + RTN_NAT = 0xa + RTN_XRESOLVE = 0xb + RTNLGRP_NONE = 0x0 + RTNLGRP_LINK = 0x1 + RTNLGRP_NOTIFY = 0x2 + RTNLGRP_NEIGH = 0x3 + RTNLGRP_TC = 0x4 + RTNLGRP_IPV4_IFADDR = 0x5 + RTNLGRP_IPV4_MROUTE = 0x6 + RTNLGRP_IPV4_ROUTE = 0x7 + RTNLGRP_IPV4_RULE = 0x8 + RTNLGRP_IPV6_IFADDR = 0x9 + RTNLGRP_IPV6_MROUTE = 0xa + RTNLGRP_IPV6_ROUTE = 0xb + RTNLGRP_IPV6_IFINFO = 0xc + RTNLGRP_IPV6_PREFIX = 0x12 + RTNLGRP_IPV6_RULE = 0x13 + RTNLGRP_ND_USEROPT = 0x14 + SizeofNlMsghdr = 0x10 + SizeofNlMsgerr = 0x14 + SizeofRtGenmsg = 0x1 + SizeofNlAttr = 0x4 + SizeofRtAttr = 0x4 + SizeofIfInfomsg = 0x10 + SizeofIfAddrmsg = 0x8 + SizeofRtMsg = 0xc + SizeofRtNexthop = 0x8 ) type NlMsghdr struct { diff --git a/vendor/golang.org/x/sys/unix/ztypes_netbsd_386.go b/vendor/golang.org/x/sys/unix/ztypes_netbsd_386.go index da70faa82d3..4b86fb2b332 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_netbsd_386.go +++ b/vendor/golang.org/x/sys/unix/ztypes_netbsd_386.go @@ -103,6 +103,15 @@ const ( PathMax = 0x400 ) +const ( + FADV_NORMAL = 0x0 + FADV_RANDOM = 0x1 + FADV_SEQUENTIAL = 0x2 + FADV_WILLNEED = 0x3 + FADV_DONTNEED = 0x4 + FADV_NOREUSE = 0x5 +) + type RawSockaddrInet4 struct { Len uint8 Family uint8 diff --git a/vendor/golang.org/x/sys/unix/ztypes_netbsd_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_netbsd_amd64.go index 0963ab8c436..9048a509d08 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_netbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_netbsd_amd64.go @@ -107,6 +107,15 @@ const ( PathMax = 0x400 ) +const ( + FADV_NORMAL = 0x0 + FADV_RANDOM = 0x1 + FADV_SEQUENTIAL = 0x2 + FADV_WILLNEED = 0x3 + FADV_DONTNEED = 0x4 + FADV_NOREUSE = 0x5 +) + type RawSockaddrInet4 struct { Len uint8 Family uint8 diff --git a/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go b/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go index 211f6419344..00525e7b029 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go @@ -108,6 +108,15 @@ const ( PathMax = 0x400 ) +const ( + FADV_NORMAL = 0x0 + FADV_RANDOM = 0x1 + FADV_SEQUENTIAL = 0x2 + FADV_WILLNEED = 0x3 + FADV_DONTNEED = 0x4 + FADV_NOREUSE = 0x5 +) + type RawSockaddrInet4 struct { Len uint8 Family uint8 diff --git a/vendor/golang.org/x/sys/unix/ztypes_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_openbsd_amd64.go index d53141085ac..5a9c8184859 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_openbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_openbsd_amd64.go @@ -74,30 +74,30 @@ const ( ) type Stat_t struct { - Mode uint32 - Dev int32 - Ino uint64 - Nlink uint32 - Uid uint32 - Gid uint32 - Rdev int32 - Atim Timespec - Mtim Timespec - Ctim Timespec - Size int64 - Blocks int64 - Blksize uint32 - Flags uint32 - Gen uint32 - Pad_cgo_0 [4]byte - X__st_birthtim Timespec + Mode uint32 + Dev int32 + Ino uint64 + Nlink uint32 + Uid uint32 + Gid uint32 + Rdev int32 + Atim Timespec + Mtim Timespec + Ctim Timespec + Size int64 + Blocks int64 + Blksize int32 + Flags uint32 + Gen uint32 + _ [4]byte + _ Timespec } type Statfs_t struct { F_flags uint32 F_bsize uint32 F_iosize uint32 - Pad_cgo_0 [4]byte + _ [4]byte F_blocks uint64 F_bfree uint64 F_bavail int64 @@ -116,7 +116,7 @@ type Statfs_t struct { F_mntonname [90]int8 F_mntfromname [90]int8 F_mntfromspec [90]int8 - Pad_cgo_1 [2]byte + _ [2]byte Mount_info [160]byte } @@ -129,13 +129,13 @@ type Flock_t struct { } type Dirent struct { - Fileno uint64 - Off int64 - Reclen uint16 - Type uint8 - Namlen uint8 - X__d_padding [4]uint8 - Name [256]int8 + Fileno uint64 + Off int64 + Reclen uint16 + Type uint8 + Namlen uint8 + _ [4]uint8 + Name [256]int8 } type Fsid struct { @@ -216,10 +216,10 @@ type IPv6Mreq struct { type Msghdr struct { Name *byte Namelen uint32 - Pad_cgo_0 [4]byte + _ [4]byte Iov *Iovec Iovlen uint32 - Pad_cgo_1 [4]byte + _ [4]byte Control *byte Controllen uint32 Flags int32 @@ -281,8 +281,8 @@ type FdSet struct { } const ( - SizeofIfMsghdr = 0xf8 - SizeofIfData = 0xe0 + SizeofIfMsghdr = 0xa8 + SizeofIfData = 0x90 SizeofIfaMsghdr = 0x18 SizeofIfAnnounceMsghdr = 0x1a SizeofRtMsghdr = 0x60 @@ -311,7 +311,7 @@ type IfData struct { Link_state uint8 Mtu uint32 Metric uint32 - Pad uint32 + Rdomain uint32 Baudrate uint64 Ipackets uint64 Ierrors uint64 @@ -323,12 +323,11 @@ type IfData struct { Imcasts uint64 Omcasts uint64 Iqdrops uint64 + Oqdrops uint64 Noproto uint64 Capabilities uint32 - Pad_cgo_0 [4]byte + _ [4]byte Lastchange Timeval - Mclpool [7]Mclpool - Pad_cgo_1 [4]byte } type IfaMsghdr struct { @@ -389,13 +388,7 @@ type RtMetrics struct { Pad uint32 } -type Mclpool struct { - Grown int32 - Alive uint16 - Hwm uint16 - Cwm uint16 - Lwm uint16 -} +type Mclpool struct{} const ( SizeofBpfVersion = 0x4 @@ -416,9 +409,9 @@ type BpfStat struct { } type BpfProgram struct { - Len uint32 - Pad_cgo_0 [4]byte - Insns *BpfInsn + Len uint32 + _ [4]byte + Insns *BpfInsn } type BpfInsn struct { @@ -429,11 +422,11 @@ type BpfInsn struct { } type BpfHdr struct { - Tstamp BpfTimeval - Caplen uint32 - Datalen uint32 - Hdrlen uint16 - Pad_cgo_0 [2]byte + Tstamp BpfTimeval + Caplen uint32 + Datalen uint32 + Hdrlen uint16 + _ [2]byte } type BpfTimeval struct { diff --git a/vendor/golang.org/x/sys/unix/ztypes_solaris_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_solaris_amd64.go index d4454524867..2248598d03f 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_solaris_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_solaris_amd64.go @@ -93,40 +93,40 @@ const ( ) type Stat_t struct { - Dev uint64 - Ino uint64 - Mode uint32 - Nlink uint32 - Uid uint32 - Gid uint32 - Rdev uint64 - Size int64 - Atim Timespec - Mtim Timespec - Ctim Timespec - Blksize int32 - Pad_cgo_0 [4]byte - Blocks int64 - Fstype [16]int8 + Dev uint64 + Ino uint64 + Mode uint32 + Nlink uint32 + Uid uint32 + Gid uint32 + Rdev uint64 + Size int64 + Atim Timespec + Mtim Timespec + Ctim Timespec + Blksize int32 + _ [4]byte + Blocks int64 + Fstype [16]int8 } type Flock_t struct { - Type int16 - Whence int16 - Pad_cgo_0 [4]byte - Start int64 - Len int64 - Sysid int32 - Pid int32 - Pad [4]int64 + Type int16 + Whence int16 + _ [4]byte + Start int64 + Len int64 + Sysid int32 + Pid int32 + Pad [4]int64 } type Dirent struct { - Ino uint64 - Off int64 - Reclen uint16 - Name [1]int8 - Pad_cgo_0 [5]byte + Ino uint64 + Off int64 + Reclen uint16 + Name [1]int8 + _ [5]byte } type _Fsblkcnt_t uint64 @@ -213,13 +213,13 @@ type IPv6Mreq struct { type Msghdr struct { Name *byte Namelen uint32 - Pad_cgo_0 [4]byte + _ [4]byte Iov *Iovec Iovlen int32 - Pad_cgo_1 [4]byte + _ [4]byte Accrights *int8 Accrightslen int32 - Pad_cgo_2 [4]byte + _ [4]byte } type Cmsghdr struct { @@ -271,11 +271,11 @@ type Utsname struct { } type Ustat_t struct { - Tfree int64 - Tinode uint64 - Fname [6]int8 - Fpack [6]int8 - Pad_cgo_0 [4]byte + Tfree int64 + Tinode uint64 + Fname [6]int8 + Fpack [6]int8 + _ [4]byte } const ( @@ -295,21 +295,21 @@ const ( ) type IfMsghdr struct { - Msglen uint16 - Version uint8 - Type uint8 - Addrs int32 - Flags int32 - Index uint16 - Pad_cgo_0 [2]byte - Data IfData + Msglen uint16 + Version uint8 + Type uint8 + Addrs int32 + Flags int32 + Index uint16 + _ [2]byte + Data IfData } type IfData struct { Type uint8 Addrlen uint8 Hdrlen uint8 - Pad_cgo_0 [1]byte + _ [1]byte Mtu uint32 Metric uint32 Baudrate uint32 @@ -328,30 +328,30 @@ type IfData struct { } type IfaMsghdr struct { - Msglen uint16 - Version uint8 - Type uint8 - Addrs int32 - Flags int32 - Index uint16 - Pad_cgo_0 [2]byte - Metric int32 + Msglen uint16 + Version uint8 + Type uint8 + Addrs int32 + Flags int32 + Index uint16 + _ [2]byte + Metric int32 } type RtMsghdr struct { - Msglen uint16 - Version uint8 - Type uint8 - Index uint16 - Pad_cgo_0 [2]byte - Flags int32 - Addrs int32 - Pid int32 - Seq int32 - Errno int32 - Use int32 - Inits uint32 - Rmx RtMetrics + Msglen uint16 + Version uint8 + Type uint8 + Index uint16 + _ [2]byte + Flags int32 + Addrs int32 + Pid int32 + Seq int32 + Errno int32 + Use int32 + Inits uint32 + Rmx RtMetrics } type RtMetrics struct { @@ -388,9 +388,9 @@ type BpfStat struct { } type BpfProgram struct { - Len uint32 - Pad_cgo_0 [4]byte - Insns *BpfInsn + Len uint32 + _ [4]byte + Insns *BpfInsn } type BpfInsn struct { @@ -406,30 +406,30 @@ type BpfTimeval struct { } type BpfHdr struct { - Tstamp BpfTimeval - Caplen uint32 - Datalen uint32 - Hdrlen uint16 - Pad_cgo_0 [2]byte + Tstamp BpfTimeval + Caplen uint32 + Datalen uint32 + Hdrlen uint16 + _ [2]byte } type Termios struct { - Iflag uint32 - Oflag uint32 - Cflag uint32 - Lflag uint32 - Cc [19]uint8 - Pad_cgo_0 [1]byte + Iflag uint32 + Oflag uint32 + Cflag uint32 + Lflag uint32 + Cc [19]uint8 + _ [1]byte } type Termio struct { - Iflag uint16 - Oflag uint16 - Cflag uint16 - Lflag uint16 - Line int8 - Cc [8]uint8 - Pad_cgo_0 [1]byte + Iflag uint16 + Oflag uint16 + Cflag uint16 + Lflag uint16 + Line int8 + Cc [8]uint8 + _ [1]byte } type Winsize struct { diff --git a/vendor/golang.org/x/sys/windows/asm_windows_386.s b/vendor/golang.org/x/sys/windows/asm_windows_386.s index 1c20dd2f897..21d994d318a 100644 --- a/vendor/golang.org/x/sys/windows/asm_windows_386.s +++ b/vendor/golang.org/x/sys/windows/asm_windows_386.s @@ -6,8 +6,8 @@ // System calls for 386, Windows are implemented in runtime/syscall_windows.goc // -TEXT ·getprocaddress(SB), 7, $0-8 +TEXT ·getprocaddress(SB), 7, $0-16 JMP syscall·getprocaddress(SB) -TEXT ·loadlibrary(SB), 7, $0-4 +TEXT ·loadlibrary(SB), 7, $0-12 JMP syscall·loadlibrary(SB) diff --git a/vendor/golang.org/x/sys/windows/asm_windows_amd64.s b/vendor/golang.org/x/sys/windows/asm_windows_amd64.s index 4d025ab556d..5bfdf797414 100644 --- a/vendor/golang.org/x/sys/windows/asm_windows_amd64.s +++ b/vendor/golang.org/x/sys/windows/asm_windows_amd64.s @@ -9,5 +9,5 @@ TEXT ·getprocaddress(SB), 7, $0-32 JMP syscall·getprocaddress(SB) -TEXT ·loadlibrary(SB), 7, $0-8 +TEXT ·loadlibrary(SB), 7, $0-24 JMP syscall·loadlibrary(SB) diff --git a/vendor/golang.org/x/sys/windows/service.go b/vendor/golang.org/x/sys/windows/service.go index a500dd7dfcf..24aa90bbbe1 100644 --- a/vendor/golang.org/x/sys/windows/service.go +++ b/vendor/golang.org/x/sys/windows/service.go @@ -162,3 +162,4 @@ type ENUM_SERVICE_STATUS_PROCESS struct { //sys ChangeServiceConfig2(service Handle, infoLevel uint32, info *byte) (err error) = advapi32.ChangeServiceConfig2W //sys QueryServiceConfig2(service Handle, infoLevel uint32, buff *byte, buffSize uint32, bytesNeeded *uint32) (err error) = advapi32.QueryServiceConfig2W //sys EnumServicesStatusEx(mgr Handle, infoLevel uint32, serviceType uint32, serviceState uint32, services *byte, bufSize uint32, bytesNeeded *uint32, servicesReturned *uint32, resumeHandle *uint32, groupName *uint16) (err error) = advapi32.EnumServicesStatusExW +//sys QueryServiceStatusEx(service Handle, infoLevel uint32, buff *byte, buffSize uint32, bytesNeeded *uint32) (err error) = advapi32.QueryServiceStatusEx diff --git a/vendor/golang.org/x/sys/windows/syscall.go b/vendor/golang.org/x/sys/windows/syscall.go index b07bc2305d3..af828a91bcf 100644 --- a/vendor/golang.org/x/sys/windows/syscall.go +++ b/vendor/golang.org/x/sys/windows/syscall.go @@ -11,11 +11,14 @@ // system, set $GOOS and $GOARCH to the desired system. For example, if // you want to view documentation for freebsd/arm on linux/amd64, set $GOOS // to freebsd and $GOARCH to arm. +// // The primary use of this package is inside other packages that provide a more // portable interface to the system, such as "os", "time" and "net". Use // those packages rather than this one if you can. +// // For details of the functions and data types in this package consult // the manuals for the appropriate operating system. +// // These calls return err == nil to indicate success; otherwise // err represents an operating system error describing the failure and // holds a value of type syscall.Errno. diff --git a/vendor/golang.org/x/sys/windows/zsyscall_windows.go b/vendor/golang.org/x/sys/windows/zsyscall_windows.go index c7b3b15eadb..318c61634e1 100644 --- a/vendor/golang.org/x/sys/windows/zsyscall_windows.go +++ b/vendor/golang.org/x/sys/windows/zsyscall_windows.go @@ -65,6 +65,7 @@ var ( procChangeServiceConfig2W = modadvapi32.NewProc("ChangeServiceConfig2W") procQueryServiceConfig2W = modadvapi32.NewProc("QueryServiceConfig2W") procEnumServicesStatusExW = modadvapi32.NewProc("EnumServicesStatusExW") + procQueryServiceStatusEx = modadvapi32.NewProc("QueryServiceStatusEx") procGetLastError = modkernel32.NewProc("GetLastError") procLoadLibraryW = modkernel32.NewProc("LoadLibraryW") procLoadLibraryExW = modkernel32.NewProc("LoadLibraryExW") @@ -472,6 +473,18 @@ func EnumServicesStatusEx(mgr Handle, infoLevel uint32, serviceType uint32, serv return } +func QueryServiceStatusEx(service Handle, infoLevel uint32, buff *byte, buffSize uint32, bytesNeeded *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procQueryServiceStatusEx.Addr(), 5, uintptr(service), uintptr(infoLevel), uintptr(unsafe.Pointer(buff)), uintptr(buffSize), uintptr(unsafe.Pointer(bytesNeeded)), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + func GetLastError() (lasterr error) { r0, _, _ := syscall.Syscall(procGetLastError.Addr(), 0, 0, 0, 0) if r0 != 0 { diff --git a/vendor/google.golang.org/genproto/LICENSE b/vendor/google.golang.org/genproto/LICENSE new file mode 100644 index 00000000000..d6456956733 --- /dev/null +++ b/vendor/google.golang.org/genproto/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go b/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go new file mode 100644 index 00000000000..fb104e6dfb8 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/rpc/status/status.pb.go @@ -0,0 +1,156 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/rpc/status.proto + +package status // import "google.golang.org/genproto/googleapis/rpc/status" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import any "github.com/golang/protobuf/ptypes/any" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// The `Status` type defines a logical error model that is suitable for different +// programming environments, including REST APIs and RPC APIs. It is used by +// [gRPC](https://github.com/grpc). The error model is designed to be: +// +// - Simple to use and understand for most users +// - Flexible enough to meet unexpected needs +// +// # Overview +// +// The `Status` message contains three pieces of data: error code, error message, +// and error details. The error code should be an enum value of +// [google.rpc.Code][google.rpc.Code], but it may accept additional error codes if needed. The +// error message should be a developer-facing English message that helps +// developers *understand* and *resolve* the error. If a localized user-facing +// error message is needed, put the localized message in the error details or +// localize it in the client. The optional error details may contain arbitrary +// information about the error. There is a predefined set of error detail types +// in the package `google.rpc` that can be used for common error conditions. +// +// # Language mapping +// +// The `Status` message is the logical representation of the error model, but it +// is not necessarily the actual wire format. When the `Status` message is +// exposed in different client libraries and different wire protocols, it can be +// mapped differently. For example, it will likely be mapped to some exceptions +// in Java, but more likely mapped to some error codes in C. +// +// # Other uses +// +// The error model and the `Status` message can be used in a variety of +// environments, either with or without APIs, to provide a +// consistent developer experience across different environments. +// +// Example uses of this error model include: +// +// - Partial errors. If a service needs to return partial errors to the client, +// it may embed the `Status` in the normal response to indicate the partial +// errors. +// +// - Workflow errors. A typical workflow has multiple steps. Each step may +// have a `Status` message for error reporting. +// +// - Batch operations. If a client uses batch request and batch response, the +// `Status` message should be used directly inside batch response, one for +// each error sub-response. +// +// - Asynchronous operations. If an API call embeds asynchronous operation +// results in its response, the status of those operations should be +// represented directly using the `Status` message. +// +// - Logging. If some API errors are stored in logs, the message `Status` could +// be used directly after any stripping needed for security/privacy reasons. +type Status struct { + // The status code, which should be an enum value of [google.rpc.Code][google.rpc.Code]. + Code int32 `protobuf:"varint,1,opt,name=code" json:"code,omitempty"` + // A developer-facing error message, which should be in English. Any + // user-facing error message should be localized and sent in the + // [google.rpc.Status.details][google.rpc.Status.details] field, or localized by the client. + Message string `protobuf:"bytes,2,opt,name=message" json:"message,omitempty"` + // A list of messages that carry the error details. There is a common set of + // message types for APIs to use. + Details []*any.Any `protobuf:"bytes,3,rep,name=details" json:"details,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Status) Reset() { *m = Status{} } +func (m *Status) String() string { return proto.CompactTextString(m) } +func (*Status) ProtoMessage() {} +func (*Status) Descriptor() ([]byte, []int) { + return fileDescriptor_status_c656c685916bdf47, []int{0} +} +func (m *Status) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Status.Unmarshal(m, b) +} +func (m *Status) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Status.Marshal(b, m, deterministic) +} +func (dst *Status) XXX_Merge(src proto.Message) { + xxx_messageInfo_Status.Merge(dst, src) +} +func (m *Status) XXX_Size() int { + return xxx_messageInfo_Status.Size(m) +} +func (m *Status) XXX_DiscardUnknown() { + xxx_messageInfo_Status.DiscardUnknown(m) +} + +var xxx_messageInfo_Status proto.InternalMessageInfo + +func (m *Status) GetCode() int32 { + if m != nil { + return m.Code + } + return 0 +} + +func (m *Status) GetMessage() string { + if m != nil { + return m.Message + } + return "" +} + +func (m *Status) GetDetails() []*any.Any { + if m != nil { + return m.Details + } + return nil +} + +func init() { + proto.RegisterType((*Status)(nil), "google.rpc.Status") +} + +func init() { proto.RegisterFile("google/rpc/status.proto", fileDescriptor_status_c656c685916bdf47) } + +var fileDescriptor_status_c656c685916bdf47 = []byte{ + // 209 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x4f, 0xcf, 0xcf, 0x4f, + 0xcf, 0x49, 0xd5, 0x2f, 0x2a, 0x48, 0xd6, 0x2f, 0x2e, 0x49, 0x2c, 0x29, 0x2d, 0xd6, 0x2b, 0x28, + 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x82, 0x48, 0xe8, 0x15, 0x15, 0x24, 0x4b, 0x49, 0x42, 0x15, 0x81, + 0x65, 0x92, 0x4a, 0xd3, 0xf4, 0x13, 0xf3, 0x2a, 0x21, 0xca, 0x94, 0xd2, 0xb8, 0xd8, 0x82, 0xc1, + 0xda, 0x84, 0x84, 0xb8, 0x58, 0x92, 0xf3, 0x53, 0x52, 0x25, 0x18, 0x15, 0x18, 0x35, 0x58, 0x83, + 0xc0, 0x6c, 0x21, 0x09, 0x2e, 0xf6, 0xdc, 0xd4, 0xe2, 0xe2, 0xc4, 0xf4, 0x54, 0x09, 0x26, 0x05, + 0x46, 0x0d, 0xce, 0x20, 0x18, 0x57, 0x48, 0x8f, 0x8b, 0x3d, 0x25, 0xb5, 0x24, 0x31, 0x33, 0xa7, + 0x58, 0x82, 0x59, 0x81, 0x59, 0x83, 0xdb, 0x48, 0x44, 0x0f, 0x6a, 0x21, 0xcc, 0x12, 0x3d, 0xc7, + 0xbc, 0xca, 0x20, 0x98, 0x22, 0xa7, 0x38, 0x2e, 0xbe, 0xe4, 0xfc, 0x5c, 0x3d, 0x84, 0xa3, 0x9c, + 0xb8, 0x21, 0xf6, 0x06, 0x80, 0x94, 0x07, 0x30, 0x46, 0x99, 0x43, 0xa5, 0xd2, 0xf3, 0x73, 0x12, + 0xf3, 0xd2, 0xf5, 0xf2, 0x8b, 0xd2, 0xf5, 0xd3, 0x53, 0xf3, 0xc0, 0x86, 0xe9, 0x43, 0xa4, 0x12, + 0x0b, 0x32, 0x8b, 0x91, 0xfc, 0x69, 0x0d, 0xa1, 0x16, 0x31, 0x31, 0x07, 0x05, 0x38, 0x27, 0xb1, + 0x81, 0x55, 0x1a, 0x03, 0x02, 0x00, 0x00, 0xff, 0xff, 0xa4, 0x53, 0xf0, 0x7c, 0x10, 0x01, 0x00, + 0x00, +} diff --git a/vendor/google.golang.org/grpc/.travis.yml b/vendor/google.golang.org/grpc/.travis.yml new file mode 100644 index 00000000000..3c2621ab750 --- /dev/null +++ b/vendor/google.golang.org/grpc/.travis.yml @@ -0,0 +1,24 @@ +language: go + +go: + - 1.6.x + - 1.7.x + - 1.8.x + - 1.9.x + - 1.10.x + +matrix: + include: + - go: 1.10.x + env: RUN386=1 + +go_import_path: google.golang.org/grpc + +before_install: + - if [[ -n "$RUN386" ]]; then export GOARCH=386; fi + - if [[ "$TRAVIS_GO_VERSION" = 1.10* && "$GOARCH" != "386" ]]; then ./vet.sh -install || exit 1; fi + +script: + - if [[ "$TRAVIS_GO_VERSION" = 1.10* && "$GOARCH" != "386" ]]; then ./vet.sh || exit 1; fi + - make test || exit 1 + - if [[ "$GOARCH" != "386" ]]; then make testrace; fi diff --git a/vendor/google.golang.org/grpc/AUTHORS b/vendor/google.golang.org/grpc/AUTHORS new file mode 100644 index 00000000000..e491a9e7f78 --- /dev/null +++ b/vendor/google.golang.org/grpc/AUTHORS @@ -0,0 +1 @@ +Google Inc. diff --git a/vendor/google.golang.org/grpc/CONTRIBUTING.md b/vendor/google.golang.org/grpc/CONTRIBUTING.md new file mode 100644 index 00000000000..0863eb26b60 --- /dev/null +++ b/vendor/google.golang.org/grpc/CONTRIBUTING.md @@ -0,0 +1,36 @@ +# How to contribute + +We definitely welcome your patches and contributions to gRPC! + +If you are new to github, please start by reading [Pull Request howto](https://help.github.com/articles/about-pull-requests/) + +## Legal requirements + +In order to protect both you and ourselves, you will need to sign the +[Contributor License Agreement](https://identity.linuxfoundation.org/projects/cncf). + +## Guidelines for Pull Requests +How to get your contributions merged smoothly and quickly. + +- Create **small PRs** that are narrowly focused on **addressing a single concern**. We often times receive PRs that are trying to fix several things at a time, but only one fix is considered acceptable, nothing gets merged and both author's & review's time is wasted. Create more PRs to address different concerns and everyone will be happy. + +- For speculative changes, consider opening an issue and discussing it first. If you are suggesting a behavioral or API change, consider starting with a [gRFC proposal](https://github.com/grpc/proposal). + +- Provide a good **PR description** as a record of **what** change is being made and **why** it was made. Link to a github issue if it exists. + +- Don't fix code style and formatting unless you are already changing that line to address an issue. PRs with irrelevant changes won't be merged. If you do want to fix formatting or style, do that in a separate PR. + +- Unless your PR is trivial, you should expect there will be reviewer comments that you'll need to address before merging. We expect you to be reasonably responsive to those comments, otherwise the PR will be closed after 2-3 weeks of inactivity. + +- Maintain **clean commit history** and use **meaningful commit messages**. PRs with messy commit history are difficult to review and won't be merged. Use `rebase -i upstream/master` to curate your commit history and/or to bring in latest changes from master (but avoid rebasing in the middle of a code review). + +- Keep your PR up to date with upstream/master (if there are merge conflicts, we can't really merge your change). + +- **All tests need to be passing** before your change can be merged. We recommend you **run tests locally** before creating your PR to catch breakages early on. + - `make all` to test everything, OR + - `make vet` to catch vet errors + - `make test` to run the tests + - `make testrace` to run tests in race mode + +- Exceptions to the rules can be made if there's a compelling reason for doing so. + diff --git a/vendor/google.golang.org/grpc/LICENSE b/vendor/google.golang.org/grpc/LICENSE new file mode 100644 index 00000000000..d6456956733 --- /dev/null +++ b/vendor/google.golang.org/grpc/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/google.golang.org/grpc/Makefile b/vendor/google.golang.org/grpc/Makefile new file mode 100644 index 00000000000..6f393a808df --- /dev/null +++ b/vendor/google.golang.org/grpc/Makefile @@ -0,0 +1,48 @@ +all: vet test testrace + +deps: + go get -d -v google.golang.org/grpc/... + +updatedeps: + go get -d -v -u -f google.golang.org/grpc/... + +testdeps: + go get -d -v -t google.golang.org/grpc/... + +updatetestdeps: + go get -d -v -t -u -f google.golang.org/grpc/... + +build: deps + go build google.golang.org/grpc/... + +proto: + @ if ! which protoc > /dev/null; then \ + echo "error: protoc not installed" >&2; \ + exit 1; \ + fi + go generate google.golang.org/grpc/... + +vet: + ./vet.sh + +test: testdeps + go test -cpu 1,4 -timeout 5m google.golang.org/grpc/... + +testrace: testdeps + go test -race -cpu 1,4 -timeout 7m google.golang.org/grpc/... + +clean: + go clean -i google.golang.org/grpc/... + +.PHONY: \ + all \ + deps \ + updatedeps \ + testdeps \ + updatetestdeps \ + build \ + proto \ + vet \ + test \ + testrace \ + clean diff --git a/vendor/google.golang.org/grpc/README.md b/vendor/google.golang.org/grpc/README.md new file mode 100644 index 00000000000..789adfd6536 --- /dev/null +++ b/vendor/google.golang.org/grpc/README.md @@ -0,0 +1,45 @@ +# gRPC-Go + +[![Build Status](https://travis-ci.org/grpc/grpc-go.svg)](https://travis-ci.org/grpc/grpc-go) [![GoDoc](https://godoc.org/google.golang.org/grpc?status.svg)](https://godoc.org/google.golang.org/grpc) [![GoReportCard](https://goreportcard.com/badge/grpc/grpc-go)](https://goreportcard.com/report/github.com/grpc/grpc-go) + +The Go implementation of [gRPC](https://grpc.io/): A high performance, open source, general RPC framework that puts mobile and HTTP/2 first. For more information see the [gRPC Quick Start: Go](https://grpc.io/docs/quickstart/go.html) guide. + +Installation +------------ + +To install this package, you need to install Go and setup your Go workspace on your computer. The simplest way to install the library is to run: + +``` +$ go get -u google.golang.org/grpc +``` + +Prerequisites +------------- + +This requires Go 1.6 or later. Go 1.7 will be required soon. + +Constraints +----------- +The grpc package should only depend on standard Go packages and a small number of exceptions. If your contribution introduces new dependencies which are NOT in the [list](http://godoc.org/google.golang.org/grpc?imports), you need a discussion with gRPC-Go authors and consultants. + +Documentation +------------- +See [API documentation](https://godoc.org/google.golang.org/grpc) for package and API descriptions and find examples in the [examples directory](examples/). + +Performance +----------- +See the current benchmarks for some of the languages supported in [this dashboard](https://performance-dot-grpc-testing.appspot.com/explore?dashboard=5652536396611584&widget=490377658&container=1286539696). + +Status +------ +General Availability [Google Cloud Platform Launch Stages](https://cloud.google.com/terms/launch-stages). + +FAQ +--- + +#### Compiling error, undefined: grpc.SupportPackageIsVersion + +Please update proto package, gRPC package and rebuild the proto files: + - `go get -u github.com/golang/protobuf/{proto,protoc-gen-go}` + - `go get -u google.golang.org/grpc` + - `protoc --go_out=plugins=grpc:. *.proto` diff --git a/vendor/google.golang.org/grpc/backoff.go b/vendor/google.golang.org/grpc/backoff.go new file mode 100644 index 00000000000..c40facce510 --- /dev/null +++ b/vendor/google.golang.org/grpc/backoff.go @@ -0,0 +1,96 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "math/rand" + "time" +) + +// DefaultBackoffConfig uses values specified for backoff in +// https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md. +var DefaultBackoffConfig = BackoffConfig{ + MaxDelay: 120 * time.Second, + baseDelay: 1.0 * time.Second, + factor: 1.6, + jitter: 0.2, +} + +// backoffStrategy defines the methodology for backing off after a grpc +// connection failure. +// +// This is unexported until the gRPC project decides whether or not to allow +// alternative backoff strategies. Once a decision is made, this type and its +// method may be exported. +type backoffStrategy interface { + // backoff returns the amount of time to wait before the next retry given + // the number of consecutive failures. + backoff(retries int) time.Duration +} + +// BackoffConfig defines the parameters for the default gRPC backoff strategy. +type BackoffConfig struct { + // MaxDelay is the upper bound of backoff delay. + MaxDelay time.Duration + + // TODO(stevvooe): The following fields are not exported, as allowing + // changes would violate the current gRPC specification for backoff. If + // gRPC decides to allow more interesting backoff strategies, these fields + // may be opened up in the future. + + // baseDelay is the amount of time to wait before retrying after the first + // failure. + baseDelay time.Duration + + // factor is applied to the backoff after each retry. + factor float64 + + // jitter provides a range to randomize backoff delays. + jitter float64 +} + +func setDefaults(bc *BackoffConfig) { + md := bc.MaxDelay + *bc = DefaultBackoffConfig + + if md > 0 { + bc.MaxDelay = md + } +} + +func (bc BackoffConfig) backoff(retries int) time.Duration { + if retries == 0 { + return bc.baseDelay + } + backoff, max := float64(bc.baseDelay), float64(bc.MaxDelay) + for backoff < max && retries > 0 { + backoff *= bc.factor + retries-- + } + if backoff > max { + backoff = max + } + // Randomize backoff delays so that if a cluster of requests start at + // the same time, they won't operate in lockstep. + backoff *= 1 + bc.jitter*(rand.Float64()*2-1) + if backoff < 0 { + return 0 + } + return time.Duration(backoff) +} diff --git a/vendor/google.golang.org/grpc/balancer.go b/vendor/google.golang.org/grpc/balancer.go new file mode 100644 index 00000000000..e1730166cde --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer.go @@ -0,0 +1,416 @@ +/* + * + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "fmt" + "net" + "sync" + + "golang.org/x/net/context" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/naming" + "google.golang.org/grpc/status" +) + +// Address represents a server the client connects to. +// +// Deprecated: please use package balancer. +type Address struct { + // Addr is the server address on which a connection will be established. + Addr string + // Metadata is the information associated with Addr, which may be used + // to make load balancing decision. + Metadata interface{} +} + +// BalancerConfig specifies the configurations for Balancer. +// +// Deprecated: please use package balancer. +type BalancerConfig struct { + // DialCreds is the transport credential the Balancer implementation can + // use to dial to a remote load balancer server. The Balancer implementations + // can ignore this if it does not need to talk to another party securely. + DialCreds credentials.TransportCredentials + // Dialer is the custom dialer the Balancer implementation can use to dial + // to a remote load balancer server. The Balancer implementations + // can ignore this if it doesn't need to talk to remote balancer. + Dialer func(context.Context, string) (net.Conn, error) +} + +// BalancerGetOptions configures a Get call. +// +// Deprecated: please use package balancer. +type BalancerGetOptions struct { + // BlockingWait specifies whether Get should block when there is no + // connected address. + BlockingWait bool +} + +// Balancer chooses network addresses for RPCs. +// +// Deprecated: please use package balancer. +type Balancer interface { + // Start does the initialization work to bootstrap a Balancer. For example, + // this function may start the name resolution and watch the updates. It will + // be called when dialing. + Start(target string, config BalancerConfig) error + // Up informs the Balancer that gRPC has a connection to the server at + // addr. It returns down which is called once the connection to addr gets + // lost or closed. + // TODO: It is not clear how to construct and take advantage of the meaningful error + // parameter for down. Need realistic demands to guide. + Up(addr Address) (down func(error)) + // Get gets the address of a server for the RPC corresponding to ctx. + // i) If it returns a connected address, gRPC internals issues the RPC on the + // connection to this address; + // ii) If it returns an address on which the connection is under construction + // (initiated by Notify(...)) but not connected, gRPC internals + // * fails RPC if the RPC is fail-fast and connection is in the TransientFailure or + // Shutdown state; + // or + // * issues RPC on the connection otherwise. + // iii) If it returns an address on which the connection does not exist, gRPC + // internals treats it as an error and will fail the corresponding RPC. + // + // Therefore, the following is the recommended rule when writing a custom Balancer. + // If opts.BlockingWait is true, it should return a connected address or + // block if there is no connected address. It should respect the timeout or + // cancellation of ctx when blocking. If opts.BlockingWait is false (for fail-fast + // RPCs), it should return an address it has notified via Notify(...) immediately + // instead of blocking. + // + // The function returns put which is called once the rpc has completed or failed. + // put can collect and report RPC stats to a remote load balancer. + // + // This function should only return the errors Balancer cannot recover by itself. + // gRPC internals will fail the RPC if an error is returned. + Get(ctx context.Context, opts BalancerGetOptions) (addr Address, put func(), err error) + // Notify returns a channel that is used by gRPC internals to watch the addresses + // gRPC needs to connect. The addresses might be from a name resolver or remote + // load balancer. gRPC internals will compare it with the existing connected + // addresses. If the address Balancer notified is not in the existing connected + // addresses, gRPC starts to connect the address. If an address in the existing + // connected addresses is not in the notification list, the corresponding connection + // is shutdown gracefully. Otherwise, there are no operations to take. Note that + // the Address slice must be the full list of the Addresses which should be connected. + // It is NOT delta. + Notify() <-chan []Address + // Close shuts down the balancer. + Close() error +} + +// downErr implements net.Error. It is constructed by gRPC internals and passed to the down +// call of Balancer. +type downErr struct { + timeout bool + temporary bool + desc string +} + +func (e downErr) Error() string { return e.desc } +func (e downErr) Timeout() bool { return e.timeout } +func (e downErr) Temporary() bool { return e.temporary } + +func downErrorf(timeout, temporary bool, format string, a ...interface{}) downErr { + return downErr{ + timeout: timeout, + temporary: temporary, + desc: fmt.Sprintf(format, a...), + } +} + +// RoundRobin returns a Balancer that selects addresses round-robin. It uses r to watch +// the name resolution updates and updates the addresses available correspondingly. +// +// Deprecated: please use package balancer/roundrobin. +func RoundRobin(r naming.Resolver) Balancer { + return &roundRobin{r: r} +} + +type addrInfo struct { + addr Address + connected bool +} + +type roundRobin struct { + r naming.Resolver + w naming.Watcher + addrs []*addrInfo // all the addresses the client should potentially connect + mu sync.Mutex + addrCh chan []Address // the channel to notify gRPC internals the list of addresses the client should connect to. + next int // index of the next address to return for Get() + waitCh chan struct{} // the channel to block when there is no connected address available + done bool // The Balancer is closed. +} + +func (rr *roundRobin) watchAddrUpdates() error { + updates, err := rr.w.Next() + if err != nil { + grpclog.Warningf("grpc: the naming watcher stops working due to %v.", err) + return err + } + rr.mu.Lock() + defer rr.mu.Unlock() + for _, update := range updates { + addr := Address{ + Addr: update.Addr, + Metadata: update.Metadata, + } + switch update.Op { + case naming.Add: + var exist bool + for _, v := range rr.addrs { + if addr == v.addr { + exist = true + grpclog.Infoln("grpc: The name resolver wanted to add an existing address: ", addr) + break + } + } + if exist { + continue + } + rr.addrs = append(rr.addrs, &addrInfo{addr: addr}) + case naming.Delete: + for i, v := range rr.addrs { + if addr == v.addr { + copy(rr.addrs[i:], rr.addrs[i+1:]) + rr.addrs = rr.addrs[:len(rr.addrs)-1] + break + } + } + default: + grpclog.Errorln("Unknown update.Op ", update.Op) + } + } + // Make a copy of rr.addrs and write it onto rr.addrCh so that gRPC internals gets notified. + open := make([]Address, len(rr.addrs)) + for i, v := range rr.addrs { + open[i] = v.addr + } + if rr.done { + return ErrClientConnClosing + } + select { + case <-rr.addrCh: + default: + } + rr.addrCh <- open + return nil +} + +func (rr *roundRobin) Start(target string, config BalancerConfig) error { + rr.mu.Lock() + defer rr.mu.Unlock() + if rr.done { + return ErrClientConnClosing + } + if rr.r == nil { + // If there is no name resolver installed, it is not needed to + // do name resolution. In this case, target is added into rr.addrs + // as the only address available and rr.addrCh stays nil. + rr.addrs = append(rr.addrs, &addrInfo{addr: Address{Addr: target}}) + return nil + } + w, err := rr.r.Resolve(target) + if err != nil { + return err + } + rr.w = w + rr.addrCh = make(chan []Address, 1) + go func() { + for { + if err := rr.watchAddrUpdates(); err != nil { + return + } + } + }() + return nil +} + +// Up sets the connected state of addr and sends notification if there are pending +// Get() calls. +func (rr *roundRobin) Up(addr Address) func(error) { + rr.mu.Lock() + defer rr.mu.Unlock() + var cnt int + for _, a := range rr.addrs { + if a.addr == addr { + if a.connected { + return nil + } + a.connected = true + } + if a.connected { + cnt++ + } + } + // addr is only one which is connected. Notify the Get() callers who are blocking. + if cnt == 1 && rr.waitCh != nil { + close(rr.waitCh) + rr.waitCh = nil + } + return func(err error) { + rr.down(addr, err) + } +} + +// down unsets the connected state of addr. +func (rr *roundRobin) down(addr Address, err error) { + rr.mu.Lock() + defer rr.mu.Unlock() + for _, a := range rr.addrs { + if addr == a.addr { + a.connected = false + break + } + } +} + +// Get returns the next addr in the rotation. +func (rr *roundRobin) Get(ctx context.Context, opts BalancerGetOptions) (addr Address, put func(), err error) { + var ch chan struct{} + rr.mu.Lock() + if rr.done { + rr.mu.Unlock() + err = ErrClientConnClosing + return + } + + if len(rr.addrs) > 0 { + if rr.next >= len(rr.addrs) { + rr.next = 0 + } + next := rr.next + for { + a := rr.addrs[next] + next = (next + 1) % len(rr.addrs) + if a.connected { + addr = a.addr + rr.next = next + rr.mu.Unlock() + return + } + if next == rr.next { + // Has iterated all the possible address but none is connected. + break + } + } + } + if !opts.BlockingWait { + if len(rr.addrs) == 0 { + rr.mu.Unlock() + err = status.Errorf(codes.Unavailable, "there is no address available") + return + } + // Returns the next addr on rr.addrs for failfast RPCs. + addr = rr.addrs[rr.next].addr + rr.next++ + rr.mu.Unlock() + return + } + // Wait on rr.waitCh for non-failfast RPCs. + if rr.waitCh == nil { + ch = make(chan struct{}) + rr.waitCh = ch + } else { + ch = rr.waitCh + } + rr.mu.Unlock() + for { + select { + case <-ctx.Done(): + err = ctx.Err() + return + case <-ch: + rr.mu.Lock() + if rr.done { + rr.mu.Unlock() + err = ErrClientConnClosing + return + } + + if len(rr.addrs) > 0 { + if rr.next >= len(rr.addrs) { + rr.next = 0 + } + next := rr.next + for { + a := rr.addrs[next] + next = (next + 1) % len(rr.addrs) + if a.connected { + addr = a.addr + rr.next = next + rr.mu.Unlock() + return + } + if next == rr.next { + // Has iterated all the possible address but none is connected. + break + } + } + } + // The newly added addr got removed by Down() again. + if rr.waitCh == nil { + ch = make(chan struct{}) + rr.waitCh = ch + } else { + ch = rr.waitCh + } + rr.mu.Unlock() + } + } +} + +func (rr *roundRobin) Notify() <-chan []Address { + return rr.addrCh +} + +func (rr *roundRobin) Close() error { + rr.mu.Lock() + defer rr.mu.Unlock() + if rr.done { + return errBalancerClosed + } + rr.done = true + if rr.w != nil { + rr.w.Close() + } + if rr.waitCh != nil { + close(rr.waitCh) + rr.waitCh = nil + } + if rr.addrCh != nil { + close(rr.addrCh) + } + return nil +} + +// pickFirst is used to test multi-addresses in one addrConn in which all addresses share the same addrConn. +// It is a wrapper around roundRobin balancer. The logic of all methods works fine because balancer.Get() +// returns the only address Up by resetTransport(). +type pickFirst struct { + *roundRobin +} + +func pickFirstBalancerV1(r naming.Resolver) Balancer { + return &pickFirst{&roundRobin{r: r}} +} diff --git a/vendor/google.golang.org/grpc/balancer/balancer.go b/vendor/google.golang.org/grpc/balancer/balancer.go new file mode 100644 index 00000000000..63b8d71371e --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer/balancer.go @@ -0,0 +1,228 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package balancer defines APIs for load balancing in gRPC. +// All APIs in this package are experimental. +package balancer + +import ( + "errors" + "net" + "strings" + + "golang.org/x/net/context" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/resolver" +) + +var ( + // m is a map from name to balancer builder. + m = make(map[string]Builder) +) + +// Register registers the balancer builder to the balancer map. b.Name +// (lowercased) will be used as the name registered with this builder. +// +// NOTE: this function must only be called during initialization time (i.e. in +// an init() function), and is not thread-safe. If multiple Balancers are +// registered with the same name, the one registered last will take effect. +func Register(b Builder) { + m[strings.ToLower(b.Name())] = b +} + +// Get returns the resolver builder registered with the given name. +// Note that the compare is done in a case-insenstive fashion. +// If no builder is register with the name, nil will be returned. +func Get(name string) Builder { + if b, ok := m[strings.ToLower(name)]; ok { + return b + } + return nil +} + +// SubConn represents a gRPC sub connection. +// Each sub connection contains a list of addresses. gRPC will +// try to connect to them (in sequence), and stop trying the +// remainder once one connection is successful. +// +// The reconnect backoff will be applied on the list, not a single address. +// For example, try_on_all_addresses -> backoff -> try_on_all_addresses. +// +// All SubConns start in IDLE, and will not try to connect. To trigger +// the connecting, Balancers must call Connect. +// When the connection encounters an error, it will reconnect immediately. +// When the connection becomes IDLE, it will not reconnect unless Connect is +// called. +// +// This interface is to be implemented by gRPC. Users should not need a +// brand new implementation of this interface. For the situations like +// testing, the new implementation should embed this interface. This allows +// gRPC to add new methods to this interface. +type SubConn interface { + // UpdateAddresses updates the addresses used in this SubConn. + // gRPC checks if currently-connected address is still in the new list. + // If it's in the list, the connection will be kept. + // If it's not in the list, the connection will gracefully closed, and + // a new connection will be created. + // + // This will trigger a state transition for the SubConn. + UpdateAddresses([]resolver.Address) + // Connect starts the connecting for this SubConn. + Connect() +} + +// NewSubConnOptions contains options to create new SubConn. +type NewSubConnOptions struct{} + +// ClientConn represents a gRPC ClientConn. +// +// This interface is to be implemented by gRPC. Users should not need a +// brand new implementation of this interface. For the situations like +// testing, the new implementation should embed this interface. This allows +// gRPC to add new methods to this interface. +type ClientConn interface { + // NewSubConn is called by balancer to create a new SubConn. + // It doesn't block and wait for the connections to be established. + // Behaviors of the SubConn can be controlled by options. + NewSubConn([]resolver.Address, NewSubConnOptions) (SubConn, error) + // RemoveSubConn removes the SubConn from ClientConn. + // The SubConn will be shutdown. + RemoveSubConn(SubConn) + + // UpdateBalancerState is called by balancer to nofity gRPC that some internal + // state in balancer has changed. + // + // gRPC will update the connectivity state of the ClientConn, and will call pick + // on the new picker to pick new SubConn. + UpdateBalancerState(s connectivity.State, p Picker) + + // ResolveNow is called by balancer to notify gRPC to do a name resolving. + ResolveNow(resolver.ResolveNowOption) + + // Target returns the dial target for this ClientConn. + Target() string +} + +// BuildOptions contains additional information for Build. +type BuildOptions struct { + // DialCreds is the transport credential the Balancer implementation can + // use to dial to a remote load balancer server. The Balancer implementations + // can ignore this if it does not need to talk to another party securely. + DialCreds credentials.TransportCredentials + // Dialer is the custom dialer the Balancer implementation can use to dial + // to a remote load balancer server. The Balancer implementations + // can ignore this if it doesn't need to talk to remote balancer. + Dialer func(context.Context, string) (net.Conn, error) + // ChannelzParentID is the entity parent's channelz unique identification number. + ChannelzParentID int64 +} + +// Builder creates a balancer. +type Builder interface { + // Build creates a new balancer with the ClientConn. + Build(cc ClientConn, opts BuildOptions) Balancer + // Name returns the name of balancers built by this builder. + // It will be used to pick balancers (for example in service config). + Name() string +} + +// PickOptions contains addition information for the Pick operation. +type PickOptions struct{} + +// DoneInfo contains additional information for done. +type DoneInfo struct { + // Err is the rpc error the RPC finished with. It could be nil. + Err error + // BytesSent indicates if any bytes have been sent to the server. + BytesSent bool + // BytesReceived indicates if any byte has been received from the server. + BytesReceived bool +} + +var ( + // ErrNoSubConnAvailable indicates no SubConn is available for pick(). + // gRPC will block the RPC until a new picker is available via UpdateBalancerState(). + ErrNoSubConnAvailable = errors.New("no SubConn is available") + // ErrTransientFailure indicates all SubConns are in TransientFailure. + // WaitForReady RPCs will block, non-WaitForReady RPCs will fail. + ErrTransientFailure = errors.New("all SubConns are in TransientFailure") +) + +// Picker is used by gRPC to pick a SubConn to send an RPC. +// Balancer is expected to generate a new picker from its snapshot every time its +// internal state has changed. +// +// The pickers used by gRPC can be updated by ClientConn.UpdateBalancerState(). +type Picker interface { + // Pick returns the SubConn to be used to send the RPC. + // The returned SubConn must be one returned by NewSubConn(). + // + // This functions is expected to return: + // - a SubConn that is known to be READY; + // - ErrNoSubConnAvailable if no SubConn is available, but progress is being + // made (for example, some SubConn is in CONNECTING mode); + // - other errors if no active connecting is happening (for example, all SubConn + // are in TRANSIENT_FAILURE mode). + // + // If a SubConn is returned: + // - If it is READY, gRPC will send the RPC on it; + // - If it is not ready, or becomes not ready after it's returned, gRPC will block + // until UpdateBalancerState() is called and will call pick on the new picker. + // + // If the returned error is not nil: + // - If the error is ErrNoSubConnAvailable, gRPC will block until UpdateBalancerState() + // - If the error is ErrTransientFailure: + // - If the RPC is wait-for-ready, gRPC will block until UpdateBalancerState() + // is called to pick again; + // - Otherwise, RPC will fail with unavailable error. + // - Else (error is other non-nil error): + // - The RPC will fail with unavailable error. + // + // The returned done() function will be called once the rpc has finished, with the + // final status of that RPC. + // done may be nil if balancer doesn't care about the RPC status. + Pick(ctx context.Context, opts PickOptions) (conn SubConn, done func(DoneInfo), err error) +} + +// Balancer takes input from gRPC, manages SubConns, and collects and aggregates +// the connectivity states. +// +// It also generates and updates the Picker used by gRPC to pick SubConns for RPCs. +// +// HandleSubConnectionStateChange, HandleResolvedAddrs and Close are guaranteed +// to be called synchronously from the same goroutine. +// There's no guarantee on picker.Pick, it may be called anytime. +type Balancer interface { + // HandleSubConnStateChange is called by gRPC when the connectivity state + // of sc has changed. + // Balancer is expected to aggregate all the state of SubConn and report + // that back to gRPC. + // Balancer should also generate and update Pickers when its internal state has + // been changed by the new state. + HandleSubConnStateChange(sc SubConn, state connectivity.State) + // HandleResolvedAddrs is called by gRPC to send updated resolved addresses to + // balancers. + // Balancer can create new SubConn or remove SubConn with the addresses. + // An empty address slice and a non-nil error will be passed if the resolver returns + // non-nil error to gRPC. + HandleResolvedAddrs([]resolver.Address, error) + // Close closes the balancer. The balancer is not required to call + // ClientConn.RemoveSubConn for its existing SubConns. + Close() +} diff --git a/vendor/google.golang.org/grpc/balancer/base/balancer.go b/vendor/google.golang.org/grpc/balancer/base/balancer.go new file mode 100644 index 00000000000..23d13511bb2 --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer/base/balancer.go @@ -0,0 +1,208 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package base + +import ( + "golang.org/x/net/context" + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/resolver" +) + +type baseBuilder struct { + name string + pickerBuilder PickerBuilder +} + +func (bb *baseBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) balancer.Balancer { + return &baseBalancer{ + cc: cc, + pickerBuilder: bb.pickerBuilder, + + subConns: make(map[resolver.Address]balancer.SubConn), + scStates: make(map[balancer.SubConn]connectivity.State), + csEvltr: &connectivityStateEvaluator{}, + // Initialize picker to a picker that always return + // ErrNoSubConnAvailable, because when state of a SubConn changes, we + // may call UpdateBalancerState with this picker. + picker: NewErrPicker(balancer.ErrNoSubConnAvailable), + } +} + +func (bb *baseBuilder) Name() string { + return bb.name +} + +type baseBalancer struct { + cc balancer.ClientConn + pickerBuilder PickerBuilder + + csEvltr *connectivityStateEvaluator + state connectivity.State + + subConns map[resolver.Address]balancer.SubConn + scStates map[balancer.SubConn]connectivity.State + picker balancer.Picker +} + +func (b *baseBalancer) HandleResolvedAddrs(addrs []resolver.Address, err error) { + if err != nil { + grpclog.Infof("base.baseBalancer: HandleResolvedAddrs called with error %v", err) + return + } + grpclog.Infoln("base.baseBalancer: got new resolved addresses: ", addrs) + // addrsSet is the set converted from addrs, it's used for quick lookup of an address. + addrsSet := make(map[resolver.Address]struct{}) + for _, a := range addrs { + addrsSet[a] = struct{}{} + if _, ok := b.subConns[a]; !ok { + // a is a new address (not existing in b.subConns). + sc, err := b.cc.NewSubConn([]resolver.Address{a}, balancer.NewSubConnOptions{}) + if err != nil { + grpclog.Warningf("base.baseBalancer: failed to create new SubConn: %v", err) + continue + } + b.subConns[a] = sc + b.scStates[sc] = connectivity.Idle + sc.Connect() + } + } + for a, sc := range b.subConns { + // a was removed by resolver. + if _, ok := addrsSet[a]; !ok { + b.cc.RemoveSubConn(sc) + delete(b.subConns, a) + // Keep the state of this sc in b.scStates until sc's state becomes Shutdown. + // The entry will be deleted in HandleSubConnStateChange. + } + } +} + +// regeneratePicker takes a snapshot of the balancer, and generates a picker +// from it. The picker is +// - errPicker with ErrTransientFailure if the balancer is in TransientFailure, +// - built by the pickerBuilder with all READY SubConns otherwise. +func (b *baseBalancer) regeneratePicker() { + if b.state == connectivity.TransientFailure { + b.picker = NewErrPicker(balancer.ErrTransientFailure) + return + } + readySCs := make(map[resolver.Address]balancer.SubConn) + + // Filter out all ready SCs from full subConn map. + for addr, sc := range b.subConns { + if st, ok := b.scStates[sc]; ok && st == connectivity.Ready { + readySCs[addr] = sc + } + } + b.picker = b.pickerBuilder.Build(readySCs) +} + +func (b *baseBalancer) HandleSubConnStateChange(sc balancer.SubConn, s connectivity.State) { + grpclog.Infof("base.baseBalancer: handle SubConn state change: %p, %v", sc, s) + oldS, ok := b.scStates[sc] + if !ok { + grpclog.Infof("base.baseBalancer: got state changes for an unknown SubConn: %p, %v", sc, s) + return + } + b.scStates[sc] = s + switch s { + case connectivity.Idle: + sc.Connect() + case connectivity.Shutdown: + // When an address was removed by resolver, b called RemoveSubConn but + // kept the sc's state in scStates. Remove state for this sc here. + delete(b.scStates, sc) + } + + oldAggrState := b.state + b.state = b.csEvltr.recordTransition(oldS, s) + + // Regenerate picker when one of the following happens: + // - this sc became ready from not-ready + // - this sc became not-ready from ready + // - the aggregated state of balancer became TransientFailure from non-TransientFailure + // - the aggregated state of balancer became non-TransientFailure from TransientFailure + if (s == connectivity.Ready) != (oldS == connectivity.Ready) || + (b.state == connectivity.TransientFailure) != (oldAggrState == connectivity.TransientFailure) { + b.regeneratePicker() + } + + b.cc.UpdateBalancerState(b.state, b.picker) +} + +// Close is a nop because base balancer doesn't have internal state to clean up, +// and it doesn't need to call RemoveSubConn for the SubConns. +func (b *baseBalancer) Close() { +} + +// NewErrPicker returns a picker that always returns err on Pick(). +func NewErrPicker(err error) balancer.Picker { + return &errPicker{err: err} +} + +type errPicker struct { + err error // Pick() always returns this err. +} + +func (p *errPicker) Pick(ctx context.Context, opts balancer.PickOptions) (balancer.SubConn, func(balancer.DoneInfo), error) { + return nil, nil, p.err +} + +// connectivityStateEvaluator gets updated by addrConns when their +// states transition, based on which it evaluates the state of +// ClientConn. +type connectivityStateEvaluator struct { + numReady uint64 // Number of addrConns in ready state. + numConnecting uint64 // Number of addrConns in connecting state. + numTransientFailure uint64 // Number of addrConns in transientFailure. +} + +// recordTransition records state change happening in every subConn and based on +// that it evaluates what aggregated state should be. +// It can only transition between Ready, Connecting and TransientFailure. Other states, +// Idle and Shutdown are transitioned into by ClientConn; in the beginning of the connection +// before any subConn is created ClientConn is in idle state. In the end when ClientConn +// closes it is in Shutdown state. +// +// recordTransition should only be called synchronously from the same goroutine. +func (cse *connectivityStateEvaluator) recordTransition(oldState, newState connectivity.State) connectivity.State { + // Update counters. + for idx, state := range []connectivity.State{oldState, newState} { + updateVal := 2*uint64(idx) - 1 // -1 for oldState and +1 for new. + switch state { + case connectivity.Ready: + cse.numReady += updateVal + case connectivity.Connecting: + cse.numConnecting += updateVal + case connectivity.TransientFailure: + cse.numTransientFailure += updateVal + } + } + + // Evaluate. + if cse.numReady > 0 { + return connectivity.Ready + } + if cse.numConnecting > 0 { + return connectivity.Connecting + } + return connectivity.TransientFailure +} diff --git a/vendor/google.golang.org/grpc/balancer/base/base.go b/vendor/google.golang.org/grpc/balancer/base/base.go new file mode 100644 index 00000000000..012ace2f2f7 --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer/base/base.go @@ -0,0 +1,52 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package base defines a balancer base that can be used to build balancers with +// different picking algorithms. +// +// The base balancer creates a new SubConn for each resolved address. The +// provided picker will only be notified about READY SubConns. +// +// This package is the base of round_robin balancer, its purpose is to be used +// to build round_robin like balancers with complex picking algorithms. +// Balancers with more complicated logic should try to implement a balancer +// builder from scratch. +// +// All APIs in this package are experimental. +package base + +import ( + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/resolver" +) + +// PickerBuilder creates balancer.Picker. +type PickerBuilder interface { + // Build takes a slice of ready SubConns, and returns a picker that will be + // used by gRPC to pick a SubConn. + Build(readySCs map[resolver.Address]balancer.SubConn) balancer.Picker +} + +// NewBalancerBuilder returns a balancer builder. The balancers +// built by this builder will use the picker builder to build pickers. +func NewBalancerBuilder(name string, pb PickerBuilder) balancer.Builder { + return &baseBuilder{ + name: name, + pickerBuilder: pb, + } +} diff --git a/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go b/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go new file mode 100644 index 00000000000..2eda0a1c210 --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go @@ -0,0 +1,79 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package roundrobin defines a roundrobin balancer. Roundrobin balancer is +// installed as one of the default balancers in gRPC, users don't need to +// explicitly install this balancer. +package roundrobin + +import ( + "sync" + + "golang.org/x/net/context" + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/balancer/base" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/resolver" +) + +// Name is the name of round_robin balancer. +const Name = "round_robin" + +// newBuilder creates a new roundrobin balancer builder. +func newBuilder() balancer.Builder { + return base.NewBalancerBuilder(Name, &rrPickerBuilder{}) +} + +func init() { + balancer.Register(newBuilder()) +} + +type rrPickerBuilder struct{} + +func (*rrPickerBuilder) Build(readySCs map[resolver.Address]balancer.SubConn) balancer.Picker { + grpclog.Infof("roundrobinPicker: newPicker called with readySCs: %v", readySCs) + var scs []balancer.SubConn + for _, sc := range readySCs { + scs = append(scs, sc) + } + return &rrPicker{ + subConns: scs, + } +} + +type rrPicker struct { + // subConns is the snapshot of the roundrobin balancer when this picker was + // created. The slice is immutable. Each Get() will do a round robin + // selection from it and return the selected SubConn. + subConns []balancer.SubConn + + mu sync.Mutex + next int +} + +func (p *rrPicker) Pick(ctx context.Context, opts balancer.PickOptions) (balancer.SubConn, func(balancer.DoneInfo), error) { + if len(p.subConns) <= 0 { + return nil, nil, balancer.ErrNoSubConnAvailable + } + + p.mu.Lock() + sc := p.subConns[p.next] + p.next = (p.next + 1) % len(p.subConns) + p.mu.Unlock() + return sc, nil, nil +} diff --git a/vendor/google.golang.org/grpc/balancer_conn_wrappers.go b/vendor/google.golang.org/grpc/balancer_conn_wrappers.go new file mode 100644 index 00000000000..c23f81706fb --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer_conn_wrappers.go @@ -0,0 +1,300 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "fmt" + "sync" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/resolver" +) + +// scStateUpdate contains the subConn and the new state it changed to. +type scStateUpdate struct { + sc balancer.SubConn + state connectivity.State +} + +// scStateUpdateBuffer is an unbounded channel for scStateChangeTuple. +// TODO make a general purpose buffer that uses interface{}. +type scStateUpdateBuffer struct { + c chan *scStateUpdate + mu sync.Mutex + backlog []*scStateUpdate +} + +func newSCStateUpdateBuffer() *scStateUpdateBuffer { + return &scStateUpdateBuffer{ + c: make(chan *scStateUpdate, 1), + } +} + +func (b *scStateUpdateBuffer) put(t *scStateUpdate) { + b.mu.Lock() + defer b.mu.Unlock() + if len(b.backlog) == 0 { + select { + case b.c <- t: + return + default: + } + } + b.backlog = append(b.backlog, t) +} + +func (b *scStateUpdateBuffer) load() { + b.mu.Lock() + defer b.mu.Unlock() + if len(b.backlog) > 0 { + select { + case b.c <- b.backlog[0]: + b.backlog[0] = nil + b.backlog = b.backlog[1:] + default: + } + } +} + +// get returns the channel that the scStateUpdate will be sent to. +// +// Upon receiving, the caller should call load to send another +// scStateChangeTuple onto the channel if there is any. +func (b *scStateUpdateBuffer) get() <-chan *scStateUpdate { + return b.c +} + +// resolverUpdate contains the new resolved addresses or error if there's +// any. +type resolverUpdate struct { + addrs []resolver.Address + err error +} + +// ccBalancerWrapper is a wrapper on top of cc for balancers. +// It implements balancer.ClientConn interface. +type ccBalancerWrapper struct { + cc *ClientConn + balancer balancer.Balancer + stateChangeQueue *scStateUpdateBuffer + resolverUpdateCh chan *resolverUpdate + done chan struct{} + + mu sync.Mutex + subConns map[*acBalancerWrapper]struct{} +} + +func newCCBalancerWrapper(cc *ClientConn, b balancer.Builder, bopts balancer.BuildOptions) *ccBalancerWrapper { + ccb := &ccBalancerWrapper{ + cc: cc, + stateChangeQueue: newSCStateUpdateBuffer(), + resolverUpdateCh: make(chan *resolverUpdate, 1), + done: make(chan struct{}), + subConns: make(map[*acBalancerWrapper]struct{}), + } + go ccb.watcher() + ccb.balancer = b.Build(ccb, bopts) + return ccb +} + +// watcher balancer functions sequentially, so the balancer can be implemented +// lock-free. +func (ccb *ccBalancerWrapper) watcher() { + for { + select { + case t := <-ccb.stateChangeQueue.get(): + ccb.stateChangeQueue.load() + select { + case <-ccb.done: + ccb.balancer.Close() + return + default: + } + ccb.balancer.HandleSubConnStateChange(t.sc, t.state) + case t := <-ccb.resolverUpdateCh: + select { + case <-ccb.done: + ccb.balancer.Close() + return + default: + } + ccb.balancer.HandleResolvedAddrs(t.addrs, t.err) + case <-ccb.done: + } + + select { + case <-ccb.done: + ccb.balancer.Close() + ccb.mu.Lock() + scs := ccb.subConns + ccb.subConns = nil + ccb.mu.Unlock() + for acbw := range scs { + ccb.cc.removeAddrConn(acbw.getAddrConn(), errConnDrain) + } + return + default: + } + } +} + +func (ccb *ccBalancerWrapper) close() { + close(ccb.done) +} + +func (ccb *ccBalancerWrapper) handleSubConnStateChange(sc balancer.SubConn, s connectivity.State) { + // When updating addresses for a SubConn, if the address in use is not in + // the new addresses, the old ac will be tearDown() and a new ac will be + // created. tearDown() generates a state change with Shutdown state, we + // don't want the balancer to receive this state change. So before + // tearDown() on the old ac, ac.acbw (acWrapper) will be set to nil, and + // this function will be called with (nil, Shutdown). We don't need to call + // balancer method in this case. + if sc == nil { + return + } + ccb.stateChangeQueue.put(&scStateUpdate{ + sc: sc, + state: s, + }) +} + +func (ccb *ccBalancerWrapper) handleResolvedAddrs(addrs []resolver.Address, err error) { + select { + case <-ccb.resolverUpdateCh: + default: + } + ccb.resolverUpdateCh <- &resolverUpdate{ + addrs: addrs, + err: err, + } +} + +func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) { + if len(addrs) <= 0 { + return nil, fmt.Errorf("grpc: cannot create SubConn with empty address list") + } + ccb.mu.Lock() + defer ccb.mu.Unlock() + if ccb.subConns == nil { + return nil, fmt.Errorf("grpc: ClientConn balancer wrapper was closed") + } + ac, err := ccb.cc.newAddrConn(addrs) + if err != nil { + return nil, err + } + acbw := &acBalancerWrapper{ac: ac} + acbw.ac.mu.Lock() + ac.acbw = acbw + acbw.ac.mu.Unlock() + ccb.subConns[acbw] = struct{}{} + return acbw, nil +} + +func (ccb *ccBalancerWrapper) RemoveSubConn(sc balancer.SubConn) { + acbw, ok := sc.(*acBalancerWrapper) + if !ok { + return + } + ccb.mu.Lock() + defer ccb.mu.Unlock() + if ccb.subConns == nil { + return + } + delete(ccb.subConns, acbw) + ccb.cc.removeAddrConn(acbw.getAddrConn(), errConnDrain) +} + +func (ccb *ccBalancerWrapper) UpdateBalancerState(s connectivity.State, p balancer.Picker) { + ccb.mu.Lock() + defer ccb.mu.Unlock() + if ccb.subConns == nil { + return + } + ccb.cc.csMgr.updateState(s) + ccb.cc.blockingpicker.updatePicker(p) +} + +func (ccb *ccBalancerWrapper) ResolveNow(o resolver.ResolveNowOption) { + ccb.cc.resolveNow(o) +} + +func (ccb *ccBalancerWrapper) Target() string { + return ccb.cc.target +} + +// acBalancerWrapper is a wrapper on top of ac for balancers. +// It implements balancer.SubConn interface. +type acBalancerWrapper struct { + mu sync.Mutex + ac *addrConn +} + +func (acbw *acBalancerWrapper) UpdateAddresses(addrs []resolver.Address) { + acbw.mu.Lock() + defer acbw.mu.Unlock() + if len(addrs) <= 0 { + acbw.ac.tearDown(errConnDrain) + return + } + if !acbw.ac.tryUpdateAddrs(addrs) { + cc := acbw.ac.cc + acbw.ac.mu.Lock() + // Set old ac.acbw to nil so the Shutdown state update will be ignored + // by balancer. + // + // TODO(bar) the state transition could be wrong when tearDown() old ac + // and creating new ac, fix the transition. + acbw.ac.acbw = nil + acbw.ac.mu.Unlock() + acState := acbw.ac.getState() + acbw.ac.tearDown(errConnDrain) + + if acState == connectivity.Shutdown { + return + } + + ac, err := cc.newAddrConn(addrs) + if err != nil { + grpclog.Warningf("acBalancerWrapper: UpdateAddresses: failed to newAddrConn: %v", err) + return + } + acbw.ac = ac + ac.mu.Lock() + ac.acbw = acbw + ac.mu.Unlock() + if acState != connectivity.Idle { + ac.connect() + } + } +} + +func (acbw *acBalancerWrapper) Connect() { + acbw.mu.Lock() + defer acbw.mu.Unlock() + acbw.ac.connect() +} + +func (acbw *acBalancerWrapper) getAddrConn() *addrConn { + acbw.mu.Lock() + defer acbw.mu.Unlock() + return acbw.ac +} diff --git a/vendor/google.golang.org/grpc/balancer_v1_wrapper.go b/vendor/google.golang.org/grpc/balancer_v1_wrapper.go new file mode 100644 index 00000000000..b7abc6b7457 --- /dev/null +++ b/vendor/google.golang.org/grpc/balancer_v1_wrapper.go @@ -0,0 +1,372 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "strings" + "sync" + + "golang.org/x/net/context" + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/status" +) + +type balancerWrapperBuilder struct { + b Balancer // The v1 balancer. +} + +func (bwb *balancerWrapperBuilder) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Balancer { + targetAddr := cc.Target() + targetSplitted := strings.Split(targetAddr, ":///") + if len(targetSplitted) >= 2 { + targetAddr = targetSplitted[1] + } + + bwb.b.Start(targetAddr, BalancerConfig{ + DialCreds: opts.DialCreds, + Dialer: opts.Dialer, + }) + _, pickfirst := bwb.b.(*pickFirst) + bw := &balancerWrapper{ + balancer: bwb.b, + pickfirst: pickfirst, + cc: cc, + targetAddr: targetAddr, + startCh: make(chan struct{}), + conns: make(map[resolver.Address]balancer.SubConn), + connSt: make(map[balancer.SubConn]*scState), + csEvltr: &connectivityStateEvaluator{}, + state: connectivity.Idle, + } + cc.UpdateBalancerState(connectivity.Idle, bw) + go bw.lbWatcher() + return bw +} + +func (bwb *balancerWrapperBuilder) Name() string { + return "wrapper" +} + +type scState struct { + addr Address // The v1 address type. + s connectivity.State + down func(error) +} + +type balancerWrapper struct { + balancer Balancer // The v1 balancer. + pickfirst bool + + cc balancer.ClientConn + targetAddr string // Target without the scheme. + + // To aggregate the connectivity state. + csEvltr *connectivityStateEvaluator + state connectivity.State + + mu sync.Mutex + conns map[resolver.Address]balancer.SubConn + connSt map[balancer.SubConn]*scState + // This channel is closed when handling the first resolver result. + // lbWatcher blocks until this is closed, to avoid race between + // - NewSubConn is created, cc wants to notify balancer of state changes; + // - Build hasn't return, cc doesn't have access to balancer. + startCh chan struct{} +} + +// lbWatcher watches the Notify channel of the balancer and manages +// connections accordingly. +func (bw *balancerWrapper) lbWatcher() { + <-bw.startCh + notifyCh := bw.balancer.Notify() + if notifyCh == nil { + // There's no resolver in the balancer. Connect directly. + a := resolver.Address{ + Addr: bw.targetAddr, + Type: resolver.Backend, + } + sc, err := bw.cc.NewSubConn([]resolver.Address{a}, balancer.NewSubConnOptions{}) + if err != nil { + grpclog.Warningf("Error creating connection to %v. Err: %v", a, err) + } else { + bw.mu.Lock() + bw.conns[a] = sc + bw.connSt[sc] = &scState{ + addr: Address{Addr: bw.targetAddr}, + s: connectivity.Idle, + } + bw.mu.Unlock() + sc.Connect() + } + return + } + + for addrs := range notifyCh { + grpclog.Infof("balancerWrapper: got update addr from Notify: %v\n", addrs) + if bw.pickfirst { + var ( + oldA resolver.Address + oldSC balancer.SubConn + ) + bw.mu.Lock() + for oldA, oldSC = range bw.conns { + break + } + bw.mu.Unlock() + if len(addrs) <= 0 { + if oldSC != nil { + // Teardown old sc. + bw.mu.Lock() + delete(bw.conns, oldA) + delete(bw.connSt, oldSC) + bw.mu.Unlock() + bw.cc.RemoveSubConn(oldSC) + } + continue + } + + var newAddrs []resolver.Address + for _, a := range addrs { + newAddr := resolver.Address{ + Addr: a.Addr, + Type: resolver.Backend, // All addresses from balancer are all backends. + ServerName: "", + Metadata: a.Metadata, + } + newAddrs = append(newAddrs, newAddr) + } + if oldSC == nil { + // Create new sc. + sc, err := bw.cc.NewSubConn(newAddrs, balancer.NewSubConnOptions{}) + if err != nil { + grpclog.Warningf("Error creating connection to %v. Err: %v", newAddrs, err) + } else { + bw.mu.Lock() + // For pickfirst, there should be only one SubConn, so the + // address doesn't matter. All states updating (up and down) + // and picking should all happen on that only SubConn. + bw.conns[resolver.Address{}] = sc + bw.connSt[sc] = &scState{ + addr: addrs[0], // Use the first address. + s: connectivity.Idle, + } + bw.mu.Unlock() + sc.Connect() + } + } else { + bw.mu.Lock() + bw.connSt[oldSC].addr = addrs[0] + bw.mu.Unlock() + oldSC.UpdateAddresses(newAddrs) + } + } else { + var ( + add []resolver.Address // Addresses need to setup connections. + del []balancer.SubConn // Connections need to tear down. + ) + resAddrs := make(map[resolver.Address]Address) + for _, a := range addrs { + resAddrs[resolver.Address{ + Addr: a.Addr, + Type: resolver.Backend, // All addresses from balancer are all backends. + ServerName: "", + Metadata: a.Metadata, + }] = a + } + bw.mu.Lock() + for a := range resAddrs { + if _, ok := bw.conns[a]; !ok { + add = append(add, a) + } + } + for a, c := range bw.conns { + if _, ok := resAddrs[a]; !ok { + del = append(del, c) + delete(bw.conns, a) + // Keep the state of this sc in bw.connSt until its state becomes Shutdown. + } + } + bw.mu.Unlock() + for _, a := range add { + sc, err := bw.cc.NewSubConn([]resolver.Address{a}, balancer.NewSubConnOptions{}) + if err != nil { + grpclog.Warningf("Error creating connection to %v. Err: %v", a, err) + } else { + bw.mu.Lock() + bw.conns[a] = sc + bw.connSt[sc] = &scState{ + addr: resAddrs[a], + s: connectivity.Idle, + } + bw.mu.Unlock() + sc.Connect() + } + } + for _, c := range del { + bw.cc.RemoveSubConn(c) + } + } + } +} + +func (bw *balancerWrapper) HandleSubConnStateChange(sc balancer.SubConn, s connectivity.State) { + bw.mu.Lock() + defer bw.mu.Unlock() + scSt, ok := bw.connSt[sc] + if !ok { + return + } + if s == connectivity.Idle { + sc.Connect() + } + oldS := scSt.s + scSt.s = s + if oldS != connectivity.Ready && s == connectivity.Ready { + scSt.down = bw.balancer.Up(scSt.addr) + } else if oldS == connectivity.Ready && s != connectivity.Ready { + if scSt.down != nil { + scSt.down(errConnClosing) + } + } + sa := bw.csEvltr.recordTransition(oldS, s) + if bw.state != sa { + bw.state = sa + } + bw.cc.UpdateBalancerState(bw.state, bw) + if s == connectivity.Shutdown { + // Remove state for this sc. + delete(bw.connSt, sc) + } +} + +func (bw *balancerWrapper) HandleResolvedAddrs([]resolver.Address, error) { + bw.mu.Lock() + defer bw.mu.Unlock() + select { + case <-bw.startCh: + default: + close(bw.startCh) + } + // There should be a resolver inside the balancer. + // All updates here, if any, are ignored. +} + +func (bw *balancerWrapper) Close() { + bw.mu.Lock() + defer bw.mu.Unlock() + select { + case <-bw.startCh: + default: + close(bw.startCh) + } + bw.balancer.Close() +} + +// The picker is the balancerWrapper itself. +// Pick should never return ErrNoSubConnAvailable. +// It either blocks or returns error, consistent with v1 balancer Get(). +func (bw *balancerWrapper) Pick(ctx context.Context, opts balancer.PickOptions) (balancer.SubConn, func(balancer.DoneInfo), error) { + failfast := true // Default failfast is true. + if ss, ok := rpcInfoFromContext(ctx); ok { + failfast = ss.failfast + } + a, p, err := bw.balancer.Get(ctx, BalancerGetOptions{BlockingWait: !failfast}) + if err != nil { + return nil, nil, err + } + var done func(balancer.DoneInfo) + if p != nil { + done = func(i balancer.DoneInfo) { p() } + } + var sc balancer.SubConn + bw.mu.Lock() + defer bw.mu.Unlock() + if bw.pickfirst { + // Get the first sc in conns. + for _, sc = range bw.conns { + break + } + } else { + var ok bool + sc, ok = bw.conns[resolver.Address{ + Addr: a.Addr, + Type: resolver.Backend, + ServerName: "", + Metadata: a.Metadata, + }] + if !ok && failfast { + return nil, nil, status.Errorf(codes.Unavailable, "there is no connection available") + } + if s, ok := bw.connSt[sc]; failfast && (!ok || s.s != connectivity.Ready) { + // If the returned sc is not ready and RPC is failfast, + // return error, and this RPC will fail. + return nil, nil, status.Errorf(codes.Unavailable, "there is no connection available") + } + } + + return sc, done, nil +} + +// connectivityStateEvaluator gets updated by addrConns when their +// states transition, based on which it evaluates the state of +// ClientConn. +type connectivityStateEvaluator struct { + mu sync.Mutex + numReady uint64 // Number of addrConns in ready state. + numConnecting uint64 // Number of addrConns in connecting state. + numTransientFailure uint64 // Number of addrConns in transientFailure. +} + +// recordTransition records state change happening in every subConn and based on +// that it evaluates what aggregated state should be. +// It can only transition between Ready, Connecting and TransientFailure. Other states, +// Idle and Shutdown are transitioned into by ClientConn; in the beginning of the connection +// before any subConn is created ClientConn is in idle state. In the end when ClientConn +// closes it is in Shutdown state. +// TODO Note that in later releases, a ClientConn with no activity will be put into an Idle state. +func (cse *connectivityStateEvaluator) recordTransition(oldState, newState connectivity.State) connectivity.State { + cse.mu.Lock() + defer cse.mu.Unlock() + + // Update counters. + for idx, state := range []connectivity.State{oldState, newState} { + updateVal := 2*uint64(idx) - 1 // -1 for oldState and +1 for new. + switch state { + case connectivity.Ready: + cse.numReady += updateVal + case connectivity.Connecting: + cse.numConnecting += updateVal + case connectivity.TransientFailure: + cse.numTransientFailure += updateVal + } + } + + // Evaluate. + if cse.numReady > 0 { + return connectivity.Ready + } + if cse.numConnecting > 0 { + return connectivity.Connecting + } + return connectivity.TransientFailure +} diff --git a/vendor/google.golang.org/grpc/call.go b/vendor/google.golang.org/grpc/call.go new file mode 100644 index 00000000000..f73b7d5528f --- /dev/null +++ b/vendor/google.golang.org/grpc/call.go @@ -0,0 +1,93 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "golang.org/x/net/context" +) + +// Invoke sends the RPC request on the wire and returns after response is +// received. This is typically called by generated code. +// +// All errors returned by Invoke are compatible with the status package. +func (cc *ClientConn) Invoke(ctx context.Context, method string, args, reply interface{}, opts ...CallOption) error { + // allow interceptor to see all applicable call options, which means those + // configured as defaults from dial option as well as per-call options + opts = combine(cc.dopts.callOptions, opts) + + if cc.dopts.unaryInt != nil { + return cc.dopts.unaryInt(ctx, method, args, reply, cc, invoke, opts...) + } + return invoke(ctx, method, args, reply, cc, opts...) +} + +func combine(o1 []CallOption, o2 []CallOption) []CallOption { + // we don't use append because o1 could have extra capacity whose + // elements would be overwritten, which could cause inadvertent + // sharing (and race connditions) between concurrent calls + if len(o1) == 0 { + return o2 + } else if len(o2) == 0 { + return o1 + } + ret := make([]CallOption, len(o1)+len(o2)) + copy(ret, o1) + copy(ret[len(o1):], o2) + return ret +} + +// Invoke sends the RPC request on the wire and returns after response is +// received. This is typically called by generated code. +// +// DEPRECATED: Use ClientConn.Invoke instead. +func Invoke(ctx context.Context, method string, args, reply interface{}, cc *ClientConn, opts ...CallOption) error { + return cc.Invoke(ctx, method, args, reply, opts...) +} + +var unaryStreamDesc = &StreamDesc{ServerStreams: false, ClientStreams: false} + +func invoke(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, opts ...CallOption) error { + // TODO: implement retries in clientStream and make this simply + // newClientStream, SendMsg, RecvMsg. + firstAttempt := true + for { + csInt, err := newClientStream(ctx, unaryStreamDesc, cc, method, opts...) + if err != nil { + return err + } + cs := csInt.(*clientStream) + if err := cs.SendMsg(req); err != nil { + if !cs.c.failFast && cs.attempt.s.Unprocessed() && firstAttempt { + // TODO: Add a field to header for grpc-transparent-retry-attempts + firstAttempt = false + continue + } + return err + } + if err := cs.RecvMsg(reply); err != nil { + if !cs.c.failFast && cs.attempt.s.Unprocessed() && firstAttempt { + // TODO: Add a field to header for grpc-transparent-retry-attempts + firstAttempt = false + continue + } + return err + } + return nil + } +} diff --git a/vendor/google.golang.org/grpc/channelz/funcs.go b/vendor/google.golang.org/grpc/channelz/funcs.go new file mode 100644 index 00000000000..586a0336b47 --- /dev/null +++ b/vendor/google.golang.org/grpc/channelz/funcs.go @@ -0,0 +1,573 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package channelz defines APIs for enabling channelz service, entry +// registration/deletion, and accessing channelz data. It also defines channelz +// metric struct formats. +// +// All APIs in this package are experimental. +package channelz + +import ( + "sort" + "sync" + "sync/atomic" + + "google.golang.org/grpc/grpclog" +) + +var ( + db dbWrapper + idGen idGenerator + // EntryPerPage defines the number of channelz entries to be shown on a web page. + EntryPerPage = 50 + curState int32 +) + +// TurnOn turns on channelz data collection. +func TurnOn() { + if !IsOn() { + NewChannelzStorage() + atomic.StoreInt32(&curState, 1) + } +} + +// IsOn returns whether channelz data collection is on. +func IsOn() bool { + return atomic.CompareAndSwapInt32(&curState, 1, 1) +} + +// dbWarpper wraps around a reference to internal channelz data storage, and +// provide synchronized functionality to set and get the reference. +type dbWrapper struct { + mu sync.RWMutex + DB *channelMap +} + +func (d *dbWrapper) set(db *channelMap) { + d.mu.Lock() + d.DB = db + d.mu.Unlock() +} + +func (d *dbWrapper) get() *channelMap { + d.mu.RLock() + defer d.mu.RUnlock() + return d.DB +} + +// NewChannelzStorage initializes channelz data storage and id generator. +// +// Note: This function is exported for testing purpose only. User should not call +// it in most cases. +func NewChannelzStorage() { + db.set(&channelMap{ + topLevelChannels: make(map[int64]struct{}), + channels: make(map[int64]*channel), + listenSockets: make(map[int64]*listenSocket), + normalSockets: make(map[int64]*normalSocket), + servers: make(map[int64]*server), + subChannels: make(map[int64]*subChannel), + }) + idGen.reset() +} + +// GetTopChannels returns a slice of top channel's ChannelMetric, along with a +// boolean indicating whether there's more top channels to be queried for. +// +// The arg id specifies that only top channel with id at or above it will be included +// in the result. The returned slice is up to a length of EntryPerPage, and is +// sorted in ascending id order. +func GetTopChannels(id int64) ([]*ChannelMetric, bool) { + return db.get().GetTopChannels(id) +} + +// GetServers returns a slice of server's ServerMetric, along with a +// boolean indicating whether there's more servers to be queried for. +// +// The arg id specifies that only server with id at or above it will be included +// in the result. The returned slice is up to a length of EntryPerPage, and is +// sorted in ascending id order. +func GetServers(id int64) ([]*ServerMetric, bool) { + return db.get().GetServers(id) +} + +// GetServerSockets returns a slice of server's (identified by id) normal socket's +// SocketMetric, along with a boolean indicating whether there's more sockets to +// be queried for. +// +// The arg startID specifies that only sockets with id at or above it will be +// included in the result. The returned slice is up to a length of EntryPerPage, +// and is sorted in ascending id order. +func GetServerSockets(id int64, startID int64) ([]*SocketMetric, bool) { + return db.get().GetServerSockets(id, startID) +} + +// GetChannel returns the ChannelMetric for the channel (identified by id). +func GetChannel(id int64) *ChannelMetric { + return db.get().GetChannel(id) +} + +// GetSubChannel returns the SubChannelMetric for the subchannel (identified by id). +func GetSubChannel(id int64) *SubChannelMetric { + return db.get().GetSubChannel(id) +} + +// GetSocket returns the SocketInternalMetric for the socket (identified by id). +func GetSocket(id int64) *SocketMetric { + return db.get().GetSocket(id) +} + +// RegisterChannel registers the given channel c in channelz database with ref +// as its reference name, and add it to the child list of its parent (identified +// by pid). pid = 0 means no parent. It returns the unique channelz tracking id +// assigned to this channel. +func RegisterChannel(c Channel, pid int64, ref string) int64 { + id := idGen.genID() + cn := &channel{ + refName: ref, + c: c, + subChans: make(map[int64]string), + nestedChans: make(map[int64]string), + id: id, + pid: pid, + } + if pid == 0 { + db.get().addChannel(id, cn, true, pid, ref) + } else { + db.get().addChannel(id, cn, false, pid, ref) + } + return id +} + +// RegisterSubChannel registers the given channel c in channelz database with ref +// as its reference name, and add it to the child list of its parent (identified +// by pid). It returns the unique channelz tracking id assigned to this subchannel. +func RegisterSubChannel(c Channel, pid int64, ref string) int64 { + if pid == 0 { + grpclog.Error("a SubChannel's parent id cannot be 0") + return 0 + } + id := idGen.genID() + sc := &subChannel{ + refName: ref, + c: c, + sockets: make(map[int64]string), + id: id, + pid: pid, + } + db.get().addSubChannel(id, sc, pid, ref) + return id +} + +// RegisterServer registers the given server s in channelz database. It returns +// the unique channelz tracking id assigned to this server. +func RegisterServer(s Server, ref string) int64 { + id := idGen.genID() + svr := &server{ + refName: ref, + s: s, + sockets: make(map[int64]string), + listenSockets: make(map[int64]string), + id: id, + } + db.get().addServer(id, svr) + return id +} + +// RegisterListenSocket registers the given listen socket s in channelz database +// with ref as its reference name, and add it to the child list of its parent +// (identified by pid). It returns the unique channelz tracking id assigned to +// this listen socket. +func RegisterListenSocket(s Socket, pid int64, ref string) int64 { + if pid == 0 { + grpclog.Error("a ListenSocket's parent id cannot be 0") + return 0 + } + id := idGen.genID() + ls := &listenSocket{refName: ref, s: s, id: id, pid: pid} + db.get().addListenSocket(id, ls, pid, ref) + return id +} + +// RegisterNormalSocket registers the given normal socket s in channelz database +// with ref as its reference name, and add it to the child list of its parent +// (identified by pid). It returns the unique channelz tracking id assigned to +// this normal socket. +func RegisterNormalSocket(s Socket, pid int64, ref string) int64 { + if pid == 0 { + grpclog.Error("a NormalSocket's parent id cannot be 0") + return 0 + } + id := idGen.genID() + ns := &normalSocket{refName: ref, s: s, id: id, pid: pid} + db.get().addNormalSocket(id, ns, pid, ref) + return id +} + +// RemoveEntry removes an entry with unique channelz trakcing id to be id from +// channelz database. +func RemoveEntry(id int64) { + db.get().removeEntry(id) +} + +// channelMap is the storage data structure for channelz. +// Methods of channelMap can be divided in two two categories with respect to locking. +// 1. Methods acquire the global lock. +// 2. Methods that can only be called when global lock is held. +// A second type of method need always to be called inside a first type of method. +type channelMap struct { + mu sync.RWMutex + topLevelChannels map[int64]struct{} + servers map[int64]*server + channels map[int64]*channel + subChannels map[int64]*subChannel + listenSockets map[int64]*listenSocket + normalSockets map[int64]*normalSocket +} + +func (c *channelMap) addServer(id int64, s *server) { + c.mu.Lock() + s.cm = c + c.servers[id] = s + c.mu.Unlock() +} + +func (c *channelMap) addChannel(id int64, cn *channel, isTopChannel bool, pid int64, ref string) { + c.mu.Lock() + cn.cm = c + c.channels[id] = cn + if isTopChannel { + c.topLevelChannels[id] = struct{}{} + } else { + c.findEntry(pid).addChild(id, cn) + } + c.mu.Unlock() +} + +func (c *channelMap) addSubChannel(id int64, sc *subChannel, pid int64, ref string) { + c.mu.Lock() + sc.cm = c + c.subChannels[id] = sc + c.findEntry(pid).addChild(id, sc) + c.mu.Unlock() +} + +func (c *channelMap) addListenSocket(id int64, ls *listenSocket, pid int64, ref string) { + c.mu.Lock() + ls.cm = c + c.listenSockets[id] = ls + c.findEntry(pid).addChild(id, ls) + c.mu.Unlock() +} + +func (c *channelMap) addNormalSocket(id int64, ns *normalSocket, pid int64, ref string) { + c.mu.Lock() + ns.cm = c + c.normalSockets[id] = ns + c.findEntry(pid).addChild(id, ns) + c.mu.Unlock() +} + +// removeEntry triggers the removal of an entry, which may not indeed delete the +// entry, if it has to wait on the deletion of its children, or may lead to a chain +// of entry deletion. For example, deleting the last socket of a gracefully shutting +// down server will lead to the server being also deleted. +func (c *channelMap) removeEntry(id int64) { + c.mu.Lock() + c.findEntry(id).triggerDelete() + c.mu.Unlock() +} + +// c.mu must be held by the caller. +func (c *channelMap) findEntry(id int64) entry { + var v entry + var ok bool + if v, ok = c.channels[id]; ok { + return v + } + if v, ok = c.subChannels[id]; ok { + return v + } + if v, ok = c.servers[id]; ok { + return v + } + if v, ok = c.listenSockets[id]; ok { + return v + } + if v, ok = c.normalSockets[id]; ok { + return v + } + return &dummyEntry{idNotFound: id} +} + +// c.mu must be held by the caller +// deleteEntry simply deletes an entry from the channelMap. Before calling this +// method, caller must check this entry is ready to be deleted, i.e removeEntry() +// has been called on it, and no children still exist. +// Conditionals are ordered by the expected frequency of deletion of each entity +// type, in order to optimize performance. +func (c *channelMap) deleteEntry(id int64) { + var ok bool + if _, ok = c.normalSockets[id]; ok { + delete(c.normalSockets, id) + return + } + if _, ok = c.subChannels[id]; ok { + delete(c.subChannels, id) + return + } + if _, ok = c.channels[id]; ok { + delete(c.channels, id) + delete(c.topLevelChannels, id) + return + } + if _, ok = c.listenSockets[id]; ok { + delete(c.listenSockets, id) + return + } + if _, ok = c.servers[id]; ok { + delete(c.servers, id) + return + } +} + +type int64Slice []int64 + +func (s int64Slice) Len() int { return len(s) } +func (s int64Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s int64Slice) Less(i, j int) bool { return s[i] < s[j] } + +func copyMap(m map[int64]string) map[int64]string { + n := make(map[int64]string) + for k, v := range m { + n[k] = v + } + return n +} + +func min(a, b int) int { + if a < b { + return a + } + return b +} + +func (c *channelMap) GetTopChannels(id int64) ([]*ChannelMetric, bool) { + c.mu.RLock() + l := len(c.topLevelChannels) + ids := make([]int64, 0, l) + cns := make([]*channel, 0, min(l, EntryPerPage)) + + for k := range c.topLevelChannels { + ids = append(ids, k) + } + sort.Sort(int64Slice(ids)) + idx := sort.Search(len(ids), func(i int) bool { return ids[i] >= id }) + count := 0 + var end bool + var t []*ChannelMetric + for i, v := range ids[idx:] { + if count == EntryPerPage { + break + } + if cn, ok := c.channels[v]; ok { + cns = append(cns, cn) + t = append(t, &ChannelMetric{ + NestedChans: copyMap(cn.nestedChans), + SubChans: copyMap(cn.subChans), + }) + count++ + } + if i == len(ids[idx:])-1 { + end = true + break + } + } + c.mu.RUnlock() + if count == 0 { + end = true + } + + for i, cn := range cns { + t[i].ChannelData = cn.c.ChannelzMetric() + t[i].ID = cn.id + t[i].RefName = cn.refName + } + return t, end +} + +func (c *channelMap) GetServers(id int64) ([]*ServerMetric, bool) { + c.mu.RLock() + l := len(c.servers) + ids := make([]int64, 0, l) + ss := make([]*server, 0, min(l, EntryPerPage)) + for k := range c.servers { + ids = append(ids, k) + } + sort.Sort(int64Slice(ids)) + idx := sort.Search(len(ids), func(i int) bool { return ids[i] >= id }) + count := 0 + var end bool + var s []*ServerMetric + for i, v := range ids[idx:] { + if count == EntryPerPage { + break + } + if svr, ok := c.servers[v]; ok { + ss = append(ss, svr) + s = append(s, &ServerMetric{ + ListenSockets: copyMap(svr.listenSockets), + }) + count++ + } + if i == len(ids[idx:])-1 { + end = true + break + } + } + c.mu.RUnlock() + if count == 0 { + end = true + } + + for i, svr := range ss { + s[i].ServerData = svr.s.ChannelzMetric() + s[i].ID = svr.id + s[i].RefName = svr.refName + } + return s, end +} + +func (c *channelMap) GetServerSockets(id int64, startID int64) ([]*SocketMetric, bool) { + var svr *server + var ok bool + c.mu.RLock() + if svr, ok = c.servers[id]; !ok { + // server with id doesn't exist. + c.mu.RUnlock() + return nil, true + } + svrskts := svr.sockets + l := len(svrskts) + ids := make([]int64, 0, l) + sks := make([]*normalSocket, 0, min(l, EntryPerPage)) + for k := range svrskts { + ids = append(ids, k) + } + sort.Sort((int64Slice(ids))) + idx := sort.Search(len(ids), func(i int) bool { return ids[i] >= id }) + count := 0 + var end bool + for i, v := range ids[idx:] { + if count == EntryPerPage { + break + } + if ns, ok := c.normalSockets[v]; ok { + sks = append(sks, ns) + count++ + } + if i == len(ids[idx:])-1 { + end = true + break + } + } + c.mu.RUnlock() + if count == 0 { + end = true + } + var s []*SocketMetric + for _, ns := range sks { + sm := &SocketMetric{} + sm.SocketData = ns.s.ChannelzMetric() + sm.ID = ns.id + sm.RefName = ns.refName + s = append(s, sm) + } + return s, end +} + +func (c *channelMap) GetChannel(id int64) *ChannelMetric { + cm := &ChannelMetric{} + var cn *channel + var ok bool + c.mu.RLock() + if cn, ok = c.channels[id]; !ok { + // channel with id doesn't exist. + c.mu.RUnlock() + return nil + } + cm.NestedChans = copyMap(cn.nestedChans) + cm.SubChans = copyMap(cn.subChans) + c.mu.RUnlock() + cm.ChannelData = cn.c.ChannelzMetric() + cm.ID = cn.id + cm.RefName = cn.refName + return cm +} + +func (c *channelMap) GetSubChannel(id int64) *SubChannelMetric { + cm := &SubChannelMetric{} + var sc *subChannel + var ok bool + c.mu.RLock() + if sc, ok = c.subChannels[id]; !ok { + // subchannel with id doesn't exist. + c.mu.RUnlock() + return nil + } + cm.Sockets = copyMap(sc.sockets) + c.mu.RUnlock() + cm.ChannelData = sc.c.ChannelzMetric() + cm.ID = sc.id + cm.RefName = sc.refName + return cm +} + +func (c *channelMap) GetSocket(id int64) *SocketMetric { + sm := &SocketMetric{} + c.mu.RLock() + if ls, ok := c.listenSockets[id]; ok { + c.mu.RUnlock() + sm.SocketData = ls.s.ChannelzMetric() + sm.ID = ls.id + sm.RefName = ls.refName + return sm + } + if ns, ok := c.normalSockets[id]; ok { + c.mu.RUnlock() + sm.SocketData = ns.s.ChannelzMetric() + sm.ID = ns.id + sm.RefName = ns.refName + return sm + } + c.mu.RUnlock() + return nil +} + +type idGenerator struct { + id int64 +} + +func (i *idGenerator) reset() { + atomic.StoreInt64(&i.id, 0) +} + +func (i *idGenerator) genID() int64 { + return atomic.AddInt64(&i.id, 1) +} diff --git a/vendor/google.golang.org/grpc/channelz/types.go b/vendor/google.golang.org/grpc/channelz/types.go new file mode 100644 index 00000000000..153d75340e4 --- /dev/null +++ b/vendor/google.golang.org/grpc/channelz/types.go @@ -0,0 +1,418 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package channelz + +import ( + "net" + "time" + + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/grpclog" +) + +// entry represents a node in the channelz database. +type entry interface { + // addChild adds a child e, whose channelz id is id to child list + addChild(id int64, e entry) + // deleteChild deletes a child with channelz id to be id from child list + deleteChild(id int64) + // triggerDelete tries to delete self from channelz database. However, if child + // list is not empty, then deletion from the database is on hold until the last + // child is deleted from database. + triggerDelete() + // deleteSelfIfReady check whether triggerDelete() has been called before, and whether child + // list is now empty. If both conditions are met, then delete self from database. + deleteSelfIfReady() +} + +// dummyEntry is a fake entry to handle entry not found case. +type dummyEntry struct { + idNotFound int64 +} + +func (d *dummyEntry) addChild(id int64, e entry) { + // Note: It is possible for a normal program to reach here under race condition. + // For example, there could be a race between ClientConn.Close() info being propagated + // to addrConn and http2Client. ClientConn.Close() cancel the context and result + // in http2Client to error. The error info is then caught by transport monitor + // and before addrConn.tearDown() is called in side ClientConn.Close(). Therefore, + // the addrConn will create a new transport. And when registering the new transport in + // channelz, its parent addrConn could have already been torn down and deleted + // from channelz tracking, and thus reach the code here. + grpclog.Infof("attempt to add child of type %T with id %d to a parent (id=%d) that doesn't currently exist", e, id, d.idNotFound) +} + +func (d *dummyEntry) deleteChild(id int64) { + // It is possible for a normal program to reach here under race condition. + // Refer to the example described in addChild(). + grpclog.Infof("attempt to delete child with id %d from a parent (id=%d) that doesn't currently exist", id, d.idNotFound) +} + +func (d *dummyEntry) triggerDelete() { + grpclog.Warningf("attempt to delete an entry (id=%d) that doesn't currently exist", d.idNotFound) +} + +func (*dummyEntry) deleteSelfIfReady() { + // code should not reach here. deleteSelfIfReady is always called on an existing entry. +} + +// ChannelMetric defines the info channelz provides for a specific Channel, which +// includes ChannelInternalMetric and channelz-specific data, such as channelz id, +// child list, etc. +type ChannelMetric struct { + // ID is the channelz id of this channel. + ID int64 + // RefName is the human readable reference string of this channel. + RefName string + // ChannelData contains channel internal metric reported by the channel through + // ChannelzMetric(). + ChannelData *ChannelInternalMetric + // NestedChans tracks the nested channel type children of this channel in the format of + // a map from nested channel channelz id to corresponding reference string. + NestedChans map[int64]string + // SubChans tracks the subchannel type children of this channel in the format of a + // map from subchannel channelz id to corresponding reference string. + SubChans map[int64]string + // Sockets tracks the socket type children of this channel in the format of a map + // from socket channelz id to corresponding reference string. + // Note current grpc implementation doesn't allow channel having sockets directly, + // therefore, this is field is unused. + Sockets map[int64]string +} + +// SubChannelMetric defines the info channelz provides for a specific SubChannel, +// which includes ChannelInternalMetric and channelz-specific data, such as +// channelz id, child list, etc. +type SubChannelMetric struct { + // ID is the channelz id of this subchannel. + ID int64 + // RefName is the human readable reference string of this subchannel. + RefName string + // ChannelData contains subchannel internal metric reported by the subchannel + // through ChannelzMetric(). + ChannelData *ChannelInternalMetric + // NestedChans tracks the nested channel type children of this subchannel in the format of + // a map from nested channel channelz id to corresponding reference string. + // Note current grpc implementation doesn't allow subchannel to have nested channels + // as children, therefore, this field is unused. + NestedChans map[int64]string + // SubChans tracks the subchannel type children of this subchannel in the format of a + // map from subchannel channelz id to corresponding reference string. + // Note current grpc implementation doesn't allow subchannel to have subchannels + // as children, therefore, this field is unused. + SubChans map[int64]string + // Sockets tracks the socket type children of this subchannel in the format of a map + // from socket channelz id to corresponding reference string. + Sockets map[int64]string +} + +// ChannelInternalMetric defines the struct that the implementor of Channel interface +// should return from ChannelzMetric(). +type ChannelInternalMetric struct { + // current connectivity state of the channel. + State connectivity.State + // The target this channel originally tried to connect to. May be absent + Target string + // The number of calls started on the channel. + CallsStarted int64 + // The number of calls that have completed with an OK status. + CallsSucceeded int64 + // The number of calls that have a completed with a non-OK status. + CallsFailed int64 + // The last time a call was started on the channel. + LastCallStartedTimestamp time.Time + //TODO: trace +} + +// Channel is the interface that should be satisfied in order to be tracked by +// channelz as Channel or SubChannel. +type Channel interface { + ChannelzMetric() *ChannelInternalMetric +} + +type channel struct { + refName string + c Channel + closeCalled bool + nestedChans map[int64]string + subChans map[int64]string + id int64 + pid int64 + cm *channelMap +} + +func (c *channel) addChild(id int64, e entry) { + switch v := e.(type) { + case *subChannel: + c.subChans[id] = v.refName + case *channel: + c.nestedChans[id] = v.refName + default: + grpclog.Errorf("cannot add a child (id = %d) of type %T to a channel", id, e) + } +} + +func (c *channel) deleteChild(id int64) { + delete(c.subChans, id) + delete(c.nestedChans, id) + c.deleteSelfIfReady() +} + +func (c *channel) triggerDelete() { + c.closeCalled = true + c.deleteSelfIfReady() +} + +func (c *channel) deleteSelfIfReady() { + if !c.closeCalled || len(c.subChans)+len(c.nestedChans) != 0 { + return + } + c.cm.deleteEntry(c.id) + // not top channel + if c.pid != 0 { + c.cm.findEntry(c.pid).deleteChild(c.id) + } +} + +type subChannel struct { + refName string + c Channel + closeCalled bool + sockets map[int64]string + id int64 + pid int64 + cm *channelMap +} + +func (sc *subChannel) addChild(id int64, e entry) { + if v, ok := e.(*normalSocket); ok { + sc.sockets[id] = v.refName + } else { + grpclog.Errorf("cannot add a child (id = %d) of type %T to a subChannel", id, e) + } +} + +func (sc *subChannel) deleteChild(id int64) { + delete(sc.sockets, id) + sc.deleteSelfIfReady() +} + +func (sc *subChannel) triggerDelete() { + sc.closeCalled = true + sc.deleteSelfIfReady() +} + +func (sc *subChannel) deleteSelfIfReady() { + if !sc.closeCalled || len(sc.sockets) != 0 { + return + } + sc.cm.deleteEntry(sc.id) + sc.cm.findEntry(sc.pid).deleteChild(sc.id) +} + +// SocketMetric defines the info channelz provides for a specific Socket, which +// includes SocketInternalMetric and channelz-specific data, such as channelz id, etc. +type SocketMetric struct { + // ID is the channelz id of this socket. + ID int64 + // RefName is the human readable reference string of this socket. + RefName string + // SocketData contains socket internal metric reported by the socket through + // ChannelzMetric(). + SocketData *SocketInternalMetric +} + +// SocketInternalMetric defines the struct that the implementor of Socket interface +// should return from ChannelzMetric(). +type SocketInternalMetric struct { + // The number of streams that have been started. + StreamsStarted int64 + // The number of streams that have ended successfully: + // On client side, receiving frame with eos bit set. + // On server side, sending frame with eos bit set. + StreamsSucceeded int64 + // The number of streams that have ended unsuccessfully: + // On client side, termination without receiving frame with eos bit set. + // On server side, termination without sending frame with eos bit set. + StreamsFailed int64 + // The number of messages successfully sent on this socket. + MessagesSent int64 + MessagesReceived int64 + // The number of keep alives sent. This is typically implemented with HTTP/2 + // ping messages. + KeepAlivesSent int64 + // The last time a stream was created by this endpoint. Usually unset for + // servers. + LastLocalStreamCreatedTimestamp time.Time + // The last time a stream was created by the remote endpoint. Usually unset + // for clients. + LastRemoteStreamCreatedTimestamp time.Time + // The last time a message was sent by this endpoint. + LastMessageSentTimestamp time.Time + // The last time a message was received by this endpoint. + LastMessageReceivedTimestamp time.Time + // The amount of window, granted to the local endpoint by the remote endpoint. + // This may be slightly out of date due to network latency. This does NOT + // include stream level or TCP level flow control info. + LocalFlowControlWindow int64 + // The amount of window, granted to the remote endpoint by the local endpoint. + // This may be slightly out of date due to network latency. This does NOT + // include stream level or TCP level flow control info. + RemoteFlowControlWindow int64 + // The locally bound address. + LocalAddr net.Addr + // The remote bound address. May be absent. + RemoteAddr net.Addr + // Optional, represents the name of the remote endpoint, if different than + // the original target name. + RemoteName string + //TODO: socket options + //TODO: Security +} + +// Socket is the interface that should be satisfied in order to be tracked by +// channelz as Socket. +type Socket interface { + ChannelzMetric() *SocketInternalMetric +} + +type listenSocket struct { + refName string + s Socket + id int64 + pid int64 + cm *channelMap +} + +func (ls *listenSocket) addChild(id int64, e entry) { + grpclog.Errorf("cannot add a child (id = %d) of type %T to a listen socket", id, e) +} + +func (ls *listenSocket) deleteChild(id int64) { + grpclog.Errorf("cannot delete a child (id = %d) from a listen socket", id) +} + +func (ls *listenSocket) triggerDelete() { + ls.cm.deleteEntry(ls.id) + ls.cm.findEntry(ls.pid).deleteChild(ls.id) +} + +func (ls *listenSocket) deleteSelfIfReady() { + grpclog.Errorf("cannot call deleteSelfIfReady on a listen socket") +} + +type normalSocket struct { + refName string + s Socket + id int64 + pid int64 + cm *channelMap +} + +func (ns *normalSocket) addChild(id int64, e entry) { + grpclog.Errorf("cannot add a child (id = %d) of type %T to a normal socket", id, e) +} + +func (ns *normalSocket) deleteChild(id int64) { + grpclog.Errorf("cannot delete a child (id = %d) from a normal socket", id) +} + +func (ns *normalSocket) triggerDelete() { + ns.cm.deleteEntry(ns.id) + ns.cm.findEntry(ns.pid).deleteChild(ns.id) +} + +func (ns *normalSocket) deleteSelfIfReady() { + grpclog.Errorf("cannot call deleteSelfIfReady on a normal socket") +} + +// ServerMetric defines the info channelz provides for a specific Server, which +// includes ServerInternalMetric and channelz-specific data, such as channelz id, +// child list, etc. +type ServerMetric struct { + // ID is the channelz id of this server. + ID int64 + // RefName is the human readable reference string of this server. + RefName string + // ServerData contains server internal metric reported by the server through + // ChannelzMetric(). + ServerData *ServerInternalMetric + // ListenSockets tracks the listener socket type children of this server in the + // format of a map from socket channelz id to corresponding reference string. + ListenSockets map[int64]string +} + +// ServerInternalMetric defines the struct that the implementor of Server interface +// should return from ChannelzMetric(). +type ServerInternalMetric struct { + // The number of incoming calls started on the server. + CallsStarted int64 + // The number of incoming calls that have completed with an OK status. + CallsSucceeded int64 + // The number of incoming calls that have a completed with a non-OK status. + CallsFailed int64 + // The last time a call was started on the server. + LastCallStartedTimestamp time.Time + //TODO: trace +} + +// Server is the interface to be satisfied in order to be tracked by channelz as +// Server. +type Server interface { + ChannelzMetric() *ServerInternalMetric +} + +type server struct { + refName string + s Server + closeCalled bool + sockets map[int64]string + listenSockets map[int64]string + id int64 + cm *channelMap +} + +func (s *server) addChild(id int64, e entry) { + switch v := e.(type) { + case *normalSocket: + s.sockets[id] = v.refName + case *listenSocket: + s.listenSockets[id] = v.refName + default: + grpclog.Errorf("cannot add a child (id = %d) of type %T to a server", id, e) + } +} + +func (s *server) deleteChild(id int64) { + delete(s.sockets, id) + delete(s.listenSockets, id) + s.deleteSelfIfReady() +} + +func (s *server) triggerDelete() { + s.closeCalled = true + s.deleteSelfIfReady() +} + +func (s *server) deleteSelfIfReady() { + if !s.closeCalled || len(s.sockets)+len(s.listenSockets) != 0 { + return + } + s.cm.deleteEntry(s.id) +} diff --git a/vendor/google.golang.org/grpc/clientconn.go b/vendor/google.golang.org/grpc/clientconn.go new file mode 100644 index 00000000000..e8d95b43b74 --- /dev/null +++ b/vendor/google.golang.org/grpc/clientconn.go @@ -0,0 +1,1591 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "errors" + "fmt" + "math" + "net" + "reflect" + "strings" + "sync" + "time" + + "golang.org/x/net/context" + "golang.org/x/net/trace" + "google.golang.org/grpc/balancer" + _ "google.golang.org/grpc/balancer/roundrobin" // To register roundrobin. + "google.golang.org/grpc/channelz" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/keepalive" + "google.golang.org/grpc/resolver" + _ "google.golang.org/grpc/resolver/dns" // To register dns resolver. + _ "google.golang.org/grpc/resolver/passthrough" // To register passthrough resolver. + "google.golang.org/grpc/stats" + "google.golang.org/grpc/status" + "google.golang.org/grpc/transport" +) + +const ( + // minimum time to give a connection to complete + minConnectTimeout = 20 * time.Second +) + +var ( + // ErrClientConnClosing indicates that the operation is illegal because + // the ClientConn is closing. + // + // Deprecated: this error should not be relied upon by users; use the status + // code of Canceled instead. + ErrClientConnClosing = status.Error(codes.Canceled, "grpc: the client connection is closing") + // errConnDrain indicates that the connection starts to be drained and does not accept any new RPCs. + errConnDrain = errors.New("grpc: the connection is drained") + // errConnClosing indicates that the connection is closing. + errConnClosing = errors.New("grpc: the connection is closing") + // errConnUnavailable indicates that the connection is unavailable. + errConnUnavailable = errors.New("grpc: the connection is unavailable") + // errBalancerClosed indicates that the balancer is closed. + errBalancerClosed = errors.New("grpc: balancer is closed") + // We use an accessor so that minConnectTimeout can be + // atomically read and updated while testing. + getMinConnectTimeout = func() time.Duration { + return minConnectTimeout + } +) + +// The following errors are returned from Dial and DialContext +var ( + // errNoTransportSecurity indicates that there is no transport security + // being set for ClientConn. Users should either set one or explicitly + // call WithInsecure DialOption to disable security. + errNoTransportSecurity = errors.New("grpc: no transport security set (use grpc.WithInsecure() explicitly or set credentials)") + // errTransportCredentialsMissing indicates that users want to transmit security + // information (e.g., oauth2 token) which requires secure connection on an insecure + // connection. + errTransportCredentialsMissing = errors.New("grpc: the credentials require transport level security (use grpc.WithTransportCredentials() to set)") + // errCredentialsConflict indicates that grpc.WithTransportCredentials() + // and grpc.WithInsecure() are both called for a connection. + errCredentialsConflict = errors.New("grpc: transport credentials are set for an insecure connection (grpc.WithTransportCredentials() and grpc.WithInsecure() are both called)") + // errNetworkIO indicates that the connection is down due to some network I/O error. + errNetworkIO = errors.New("grpc: failed with network I/O error") +) + +// dialOptions configure a Dial call. dialOptions are set by the DialOption +// values passed to Dial. +type dialOptions struct { + unaryInt UnaryClientInterceptor + streamInt StreamClientInterceptor + cp Compressor + dc Decompressor + bs backoffStrategy + block bool + insecure bool + timeout time.Duration + scChan <-chan ServiceConfig + copts transport.ConnectOptions + callOptions []CallOption + // This is used by v1 balancer dial option WithBalancer to support v1 + // balancer, and also by WithBalancerName dial option. + balancerBuilder balancer.Builder + // This is to support grpclb. + resolverBuilder resolver.Builder + waitForHandshake bool + channelzParentID int64 + disableServiceConfig bool +} + +const ( + defaultClientMaxReceiveMessageSize = 1024 * 1024 * 4 + defaultClientMaxSendMessageSize = math.MaxInt32 +) + +// RegisterChannelz turns on channelz service. +// This is an EXPERIMENTAL API. +func RegisterChannelz() { + channelz.TurnOn() +} + +// DialOption configures how we set up the connection. +type DialOption func(*dialOptions) + +// WithWaitForHandshake blocks until the initial settings frame is received from the +// server before assigning RPCs to the connection. +// Experimental API. +func WithWaitForHandshake() DialOption { + return func(o *dialOptions) { + o.waitForHandshake = true + } +} + +// WithWriteBufferSize lets you set the size of write buffer, this determines how much data can be batched +// before doing a write on the wire. +func WithWriteBufferSize(s int) DialOption { + return func(o *dialOptions) { + o.copts.WriteBufferSize = s + } +} + +// WithReadBufferSize lets you set the size of read buffer, this determines how much data can be read at most +// for each read syscall. +func WithReadBufferSize(s int) DialOption { + return func(o *dialOptions) { + o.copts.ReadBufferSize = s + } +} + +// WithInitialWindowSize returns a DialOption which sets the value for initial window size on a stream. +// The lower bound for window size is 64K and any value smaller than that will be ignored. +func WithInitialWindowSize(s int32) DialOption { + return func(o *dialOptions) { + o.copts.InitialWindowSize = s + } +} + +// WithInitialConnWindowSize returns a DialOption which sets the value for initial window size on a connection. +// The lower bound for window size is 64K and any value smaller than that will be ignored. +func WithInitialConnWindowSize(s int32) DialOption { + return func(o *dialOptions) { + o.copts.InitialConnWindowSize = s + } +} + +// WithMaxMsgSize returns a DialOption which sets the maximum message size the client can receive. +// +// Deprecated: use WithDefaultCallOptions(MaxCallRecvMsgSize(s)) instead. +func WithMaxMsgSize(s int) DialOption { + return WithDefaultCallOptions(MaxCallRecvMsgSize(s)) +} + +// WithDefaultCallOptions returns a DialOption which sets the default CallOptions for calls over the connection. +func WithDefaultCallOptions(cos ...CallOption) DialOption { + return func(o *dialOptions) { + o.callOptions = append(o.callOptions, cos...) + } +} + +// WithCodec returns a DialOption which sets a codec for message marshaling and unmarshaling. +// +// Deprecated: use WithDefaultCallOptions(CallCustomCodec(c)) instead. +func WithCodec(c Codec) DialOption { + return WithDefaultCallOptions(CallCustomCodec(c)) +} + +// WithCompressor returns a DialOption which sets a Compressor to use for +// message compression. It has lower priority than the compressor set by +// the UseCompressor CallOption. +// +// Deprecated: use UseCompressor instead. +func WithCompressor(cp Compressor) DialOption { + return func(o *dialOptions) { + o.cp = cp + } +} + +// WithDecompressor returns a DialOption which sets a Decompressor to use for +// incoming message decompression. If incoming response messages are encoded +// using the decompressor's Type(), it will be used. Otherwise, the message +// encoding will be used to look up the compressor registered via +// encoding.RegisterCompressor, which will then be used to decompress the +// message. If no compressor is registered for the encoding, an Unimplemented +// status error will be returned. +// +// Deprecated: use encoding.RegisterCompressor instead. +func WithDecompressor(dc Decompressor) DialOption { + return func(o *dialOptions) { + o.dc = dc + } +} + +// WithBalancer returns a DialOption which sets a load balancer with the v1 API. +// Name resolver will be ignored if this DialOption is specified. +// +// Deprecated: use the new balancer APIs in balancer package and WithBalancerName. +func WithBalancer(b Balancer) DialOption { + return func(o *dialOptions) { + o.balancerBuilder = &balancerWrapperBuilder{ + b: b, + } + } +} + +// WithBalancerName sets the balancer that the ClientConn will be initialized +// with. Balancer registered with balancerName will be used. This function +// panics if no balancer was registered by balancerName. +// +// The balancer cannot be overridden by balancer option specified by service +// config. +// +// This is an EXPERIMENTAL API. +func WithBalancerName(balancerName string) DialOption { + builder := balancer.Get(balancerName) + if builder == nil { + panic(fmt.Sprintf("grpc.WithBalancerName: no balancer is registered for name %v", balancerName)) + } + return func(o *dialOptions) { + o.balancerBuilder = builder + } +} + +// withResolverBuilder is only for grpclb. +func withResolverBuilder(b resolver.Builder) DialOption { + return func(o *dialOptions) { + o.resolverBuilder = b + } +} + +// WithServiceConfig returns a DialOption which has a channel to read the service configuration. +// +// Deprecated: service config should be received through name resolver, as specified here. +// https://github.com/grpc/grpc/blob/master/doc/service_config.md +func WithServiceConfig(c <-chan ServiceConfig) DialOption { + return func(o *dialOptions) { + o.scChan = c + } +} + +// WithBackoffMaxDelay configures the dialer to use the provided maximum delay +// when backing off after failed connection attempts. +func WithBackoffMaxDelay(md time.Duration) DialOption { + return WithBackoffConfig(BackoffConfig{MaxDelay: md}) +} + +// WithBackoffConfig configures the dialer to use the provided backoff +// parameters after connection failures. +// +// Use WithBackoffMaxDelay until more parameters on BackoffConfig are opened up +// for use. +func WithBackoffConfig(b BackoffConfig) DialOption { + // Set defaults to ensure that provided BackoffConfig is valid and + // unexported fields get default values. + setDefaults(&b) + return withBackoff(b) +} + +// withBackoff sets the backoff strategy used for connectRetryNum after a +// failed connection attempt. +// +// This can be exported if arbitrary backoff strategies are allowed by gRPC. +func withBackoff(bs backoffStrategy) DialOption { + return func(o *dialOptions) { + o.bs = bs + } +} + +// WithBlock returns a DialOption which makes caller of Dial blocks until the underlying +// connection is up. Without this, Dial returns immediately and connecting the server +// happens in background. +func WithBlock() DialOption { + return func(o *dialOptions) { + o.block = true + } +} + +// WithInsecure returns a DialOption which disables transport security for this ClientConn. +// Note that transport security is required unless WithInsecure is set. +func WithInsecure() DialOption { + return func(o *dialOptions) { + o.insecure = true + } +} + +// WithTransportCredentials returns a DialOption which configures a +// connection level security credentials (e.g., TLS/SSL). +func WithTransportCredentials(creds credentials.TransportCredentials) DialOption { + return func(o *dialOptions) { + o.copts.TransportCredentials = creds + } +} + +// WithPerRPCCredentials returns a DialOption which sets +// credentials and places auth state on each outbound RPC. +func WithPerRPCCredentials(creds credentials.PerRPCCredentials) DialOption { + return func(o *dialOptions) { + o.copts.PerRPCCredentials = append(o.copts.PerRPCCredentials, creds) + } +} + +// WithTimeout returns a DialOption that configures a timeout for dialing a ClientConn +// initially. This is valid if and only if WithBlock() is present. +// +// Deprecated: use DialContext and context.WithTimeout instead. +func WithTimeout(d time.Duration) DialOption { + return func(o *dialOptions) { + o.timeout = d + } +} + +func withContextDialer(f func(context.Context, string) (net.Conn, error)) DialOption { + return func(o *dialOptions) { + o.copts.Dialer = f + } +} + +// WithDialer returns a DialOption that specifies a function to use for dialing network addresses. +// If FailOnNonTempDialError() is set to true, and an error is returned by f, gRPC checks the error's +// Temporary() method to decide if it should try to reconnect to the network address. +func WithDialer(f func(string, time.Duration) (net.Conn, error)) DialOption { + return withContextDialer( + func(ctx context.Context, addr string) (net.Conn, error) { + if deadline, ok := ctx.Deadline(); ok { + return f(addr, deadline.Sub(time.Now())) + } + return f(addr, 0) + }) +} + +// WithStatsHandler returns a DialOption that specifies the stats handler +// for all the RPCs and underlying network connections in this ClientConn. +func WithStatsHandler(h stats.Handler) DialOption { + return func(o *dialOptions) { + o.copts.StatsHandler = h + } +} + +// FailOnNonTempDialError returns a DialOption that specifies if gRPC fails on non-temporary dial errors. +// If f is true, and dialer returns a non-temporary error, gRPC will fail the connection to the network +// address and won't try to reconnect. +// The default value of FailOnNonTempDialError is false. +// This is an EXPERIMENTAL API. +func FailOnNonTempDialError(f bool) DialOption { + return func(o *dialOptions) { + o.copts.FailOnNonTempDialError = f + } +} + +// WithUserAgent returns a DialOption that specifies a user agent string for all the RPCs. +func WithUserAgent(s string) DialOption { + return func(o *dialOptions) { + o.copts.UserAgent = s + } +} + +// WithKeepaliveParams returns a DialOption that specifies keepalive parameters for the client transport. +func WithKeepaliveParams(kp keepalive.ClientParameters) DialOption { + return func(o *dialOptions) { + o.copts.KeepaliveParams = kp + } +} + +// WithUnaryInterceptor returns a DialOption that specifies the interceptor for unary RPCs. +func WithUnaryInterceptor(f UnaryClientInterceptor) DialOption { + return func(o *dialOptions) { + o.unaryInt = f + } +} + +// WithStreamInterceptor returns a DialOption that specifies the interceptor for streaming RPCs. +func WithStreamInterceptor(f StreamClientInterceptor) DialOption { + return func(o *dialOptions) { + o.streamInt = f + } +} + +// WithAuthority returns a DialOption that specifies the value to be used as +// the :authority pseudo-header. This value only works with WithInsecure and +// has no effect if TransportCredentials are present. +func WithAuthority(a string) DialOption { + return func(o *dialOptions) { + o.copts.Authority = a + } +} + +// WithChannelzParentID returns a DialOption that specifies the channelz ID of current ClientConn's +// parent. This function is used in nested channel creation (e.g. grpclb dial). +func WithChannelzParentID(id int64) DialOption { + return func(o *dialOptions) { + o.channelzParentID = id + } +} + +// WithDisableServiceConfig returns a DialOption that causes grpc to ignore any +// service config provided by the resolver and provides a hint to the resolver +// to not fetch service configs. +func WithDisableServiceConfig() DialOption { + return func(o *dialOptions) { + o.disableServiceConfig = true + } +} + +// Dial creates a client connection to the given target. +func Dial(target string, opts ...DialOption) (*ClientConn, error) { + return DialContext(context.Background(), target, opts...) +} + +// DialContext creates a client connection to the given target. By default, it's +// a non-blocking dial (the function won't wait for connections to be +// established, and connecting happens in the background). To make it a blocking +// dial, use WithBlock() dial option. +// +// In the non-blocking case, the ctx does not act against the connection. It +// only controls the setup steps. +// +// In the blocking case, ctx can be used to cancel or expire the pending +// connection. Once this function returns, the cancellation and expiration of +// ctx will be noop. Users should call ClientConn.Close to terminate all the +// pending operations after this function returns. +// +// The target name syntax is defined in +// https://github.com/grpc/grpc/blob/master/doc/naming.md. +// e.g. to use dns resolver, a "dns:///" prefix should be applied to the target. +func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *ClientConn, err error) { + cc := &ClientConn{ + target: target, + csMgr: &connectivityStateManager{}, + conns: make(map[*addrConn]struct{}), + + blockingpicker: newPickerWrapper(), + } + cc.ctx, cc.cancel = context.WithCancel(context.Background()) + + for _, opt := range opts { + opt(&cc.dopts) + } + + if channelz.IsOn() { + if cc.dopts.channelzParentID != 0 { + cc.channelzID = channelz.RegisterChannel(cc, cc.dopts.channelzParentID, target) + } else { + cc.channelzID = channelz.RegisterChannel(cc, 0, target) + } + } + + if !cc.dopts.insecure { + if cc.dopts.copts.TransportCredentials == nil { + return nil, errNoTransportSecurity + } + } else { + if cc.dopts.copts.TransportCredentials != nil { + return nil, errCredentialsConflict + } + for _, cd := range cc.dopts.copts.PerRPCCredentials { + if cd.RequireTransportSecurity() { + return nil, errTransportCredentialsMissing + } + } + } + + cc.mkp = cc.dopts.copts.KeepaliveParams + + if cc.dopts.copts.Dialer == nil { + cc.dopts.copts.Dialer = newProxyDialer( + func(ctx context.Context, addr string) (net.Conn, error) { + network, addr := parseDialTarget(addr) + return dialContext(ctx, network, addr) + }, + ) + } + + if cc.dopts.copts.UserAgent != "" { + cc.dopts.copts.UserAgent += " " + grpcUA + } else { + cc.dopts.copts.UserAgent = grpcUA + } + + if cc.dopts.timeout > 0 { + var cancel context.CancelFunc + ctx, cancel = context.WithTimeout(ctx, cc.dopts.timeout) + defer cancel() + } + + defer func() { + select { + case <-ctx.Done(): + conn, err = nil, ctx.Err() + default: + } + + if err != nil { + cc.Close() + } + }() + + scSet := false + if cc.dopts.scChan != nil { + // Try to get an initial service config. + select { + case sc, ok := <-cc.dopts.scChan: + if ok { + cc.sc = sc + scSet = true + } + default: + } + } + if cc.dopts.bs == nil { + cc.dopts.bs = DefaultBackoffConfig + } + if cc.dopts.resolverBuilder == nil { + // Only try to parse target when resolver builder is not already set. + cc.parsedTarget = parseTarget(cc.target) + grpclog.Infof("parsed scheme: %q", cc.parsedTarget.Scheme) + cc.dopts.resolverBuilder = resolver.Get(cc.parsedTarget.Scheme) + if cc.dopts.resolverBuilder == nil { + // If resolver builder is still nil, the parse target's scheme is + // not registered. Fallback to default resolver and set Endpoint to + // the original unparsed target. + grpclog.Infof("scheme %q not registered, fallback to default scheme", cc.parsedTarget.Scheme) + cc.parsedTarget = resolver.Target{ + Scheme: resolver.GetDefaultScheme(), + Endpoint: target, + } + cc.dopts.resolverBuilder = resolver.Get(cc.parsedTarget.Scheme) + } + } else { + cc.parsedTarget = resolver.Target{Endpoint: target} + } + creds := cc.dopts.copts.TransportCredentials + if creds != nil && creds.Info().ServerName != "" { + cc.authority = creds.Info().ServerName + } else if cc.dopts.insecure && cc.dopts.copts.Authority != "" { + cc.authority = cc.dopts.copts.Authority + } else { + // Use endpoint from "scheme://authority/endpoint" as the default + // authority for ClientConn. + cc.authority = cc.parsedTarget.Endpoint + } + + if cc.dopts.scChan != nil && !scSet { + // Blocking wait for the initial service config. + select { + case sc, ok := <-cc.dopts.scChan: + if ok { + cc.sc = sc + } + case <-ctx.Done(): + return nil, ctx.Err() + } + } + if cc.dopts.scChan != nil { + go cc.scWatcher() + } + + var credsClone credentials.TransportCredentials + if creds := cc.dopts.copts.TransportCredentials; creds != nil { + credsClone = creds.Clone() + } + cc.balancerBuildOpts = balancer.BuildOptions{ + DialCreds: credsClone, + Dialer: cc.dopts.copts.Dialer, + ChannelzParentID: cc.channelzID, + } + + // Build the resolver. + cc.resolverWrapper, err = newCCResolverWrapper(cc) + if err != nil { + return nil, fmt.Errorf("failed to build resolver: %v", err) + } + // Start the resolver wrapper goroutine after resolverWrapper is created. + // + // If the goroutine is started before resolverWrapper is ready, the + // following may happen: The goroutine sends updates to cc. cc forwards + // those to balancer. Balancer creates new addrConn. addrConn fails to + // connect, and calls resolveNow(). resolveNow() tries to use the non-ready + // resolverWrapper. + cc.resolverWrapper.start() + + // A blocking dial blocks until the clientConn is ready. + if cc.dopts.block { + for { + s := cc.GetState() + if s == connectivity.Ready { + break + } + if !cc.WaitForStateChange(ctx, s) { + // ctx got timeout or canceled. + return nil, ctx.Err() + } + } + } + + return cc, nil +} + +// connectivityStateManager keeps the connectivity.State of ClientConn. +// This struct will eventually be exported so the balancers can access it. +type connectivityStateManager struct { + mu sync.Mutex + state connectivity.State + notifyChan chan struct{} +} + +// updateState updates the connectivity.State of ClientConn. +// If there's a change it notifies goroutines waiting on state change to +// happen. +func (csm *connectivityStateManager) updateState(state connectivity.State) { + csm.mu.Lock() + defer csm.mu.Unlock() + if csm.state == connectivity.Shutdown { + return + } + if csm.state == state { + return + } + csm.state = state + if csm.notifyChan != nil { + // There are other goroutines waiting on this channel. + close(csm.notifyChan) + csm.notifyChan = nil + } +} + +func (csm *connectivityStateManager) getState() connectivity.State { + csm.mu.Lock() + defer csm.mu.Unlock() + return csm.state +} + +func (csm *connectivityStateManager) getNotifyChan() <-chan struct{} { + csm.mu.Lock() + defer csm.mu.Unlock() + if csm.notifyChan == nil { + csm.notifyChan = make(chan struct{}) + } + return csm.notifyChan +} + +// ClientConn represents a client connection to an RPC server. +type ClientConn struct { + ctx context.Context + cancel context.CancelFunc + + target string + parsedTarget resolver.Target + authority string + dopts dialOptions + csMgr *connectivityStateManager + + balancerBuildOpts balancer.BuildOptions + resolverWrapper *ccResolverWrapper + blockingpicker *pickerWrapper + + mu sync.RWMutex + sc ServiceConfig + scRaw string + conns map[*addrConn]struct{} + // Keepalive parameter can be updated if a GoAway is received. + mkp keepalive.ClientParameters + curBalancerName string + preBalancerName string // previous balancer name. + curAddresses []resolver.Address + balancerWrapper *ccBalancerWrapper + + channelzID int64 // channelz unique identification number + czmu sync.RWMutex + callsStarted int64 + callsSucceeded int64 + callsFailed int64 + lastCallStartedTime time.Time +} + +// WaitForStateChange waits until the connectivity.State of ClientConn changes from sourceState or +// ctx expires. A true value is returned in former case and false in latter. +// This is an EXPERIMENTAL API. +func (cc *ClientConn) WaitForStateChange(ctx context.Context, sourceState connectivity.State) bool { + ch := cc.csMgr.getNotifyChan() + if cc.csMgr.getState() != sourceState { + return true + } + select { + case <-ctx.Done(): + return false + case <-ch: + return true + } +} + +// GetState returns the connectivity.State of ClientConn. +// This is an EXPERIMENTAL API. +func (cc *ClientConn) GetState() connectivity.State { + return cc.csMgr.getState() +} + +func (cc *ClientConn) scWatcher() { + for { + select { + case sc, ok := <-cc.dopts.scChan: + if !ok { + return + } + cc.mu.Lock() + // TODO: load balance policy runtime change is ignored. + // We may revist this decision in the future. + cc.sc = sc + cc.scRaw = "" + cc.mu.Unlock() + case <-cc.ctx.Done(): + return + } + } +} + +func (cc *ClientConn) handleResolvedAddrs(addrs []resolver.Address, err error) { + cc.mu.Lock() + defer cc.mu.Unlock() + if cc.conns == nil { + // cc was closed. + return + } + + if reflect.DeepEqual(cc.curAddresses, addrs) { + return + } + + cc.curAddresses = addrs + + if cc.dopts.balancerBuilder == nil { + // Only look at balancer types and switch balancer if balancer dial + // option is not set. + var isGRPCLB bool + for _, a := range addrs { + if a.Type == resolver.GRPCLB { + isGRPCLB = true + break + } + } + var newBalancerName string + if isGRPCLB { + newBalancerName = grpclbName + } else { + // Address list doesn't contain grpclb address. Try to pick a + // non-grpclb balancer. + newBalancerName = cc.curBalancerName + // If current balancer is grpclb, switch to the previous one. + if newBalancerName == grpclbName { + newBalancerName = cc.preBalancerName + } + // The following could be true in two cases: + // - the first time handling resolved addresses + // (curBalancerName="") + // - the first time handling non-grpclb addresses + // (curBalancerName="grpclb", preBalancerName="") + if newBalancerName == "" { + newBalancerName = PickFirstBalancerName + } + } + cc.switchBalancer(newBalancerName) + } else if cc.balancerWrapper == nil { + // Balancer dial option was set, and this is the first time handling + // resolved addresses. Build a balancer with dopts.balancerBuilder. + cc.balancerWrapper = newCCBalancerWrapper(cc, cc.dopts.balancerBuilder, cc.balancerBuildOpts) + } + + cc.balancerWrapper.handleResolvedAddrs(addrs, nil) +} + +// switchBalancer starts the switching from current balancer to the balancer +// with the given name. +// +// It will NOT send the current address list to the new balancer. If needed, +// caller of this function should send address list to the new balancer after +// this function returns. +// +// Caller must hold cc.mu. +func (cc *ClientConn) switchBalancer(name string) { + if cc.conns == nil { + return + } + + if strings.ToLower(cc.curBalancerName) == strings.ToLower(name) { + return + } + + grpclog.Infof("ClientConn switching balancer to %q", name) + if cc.dopts.balancerBuilder != nil { + grpclog.Infoln("ignoring balancer switching: Balancer DialOption used instead") + return + } + // TODO(bar switching) change this to two steps: drain and close. + // Keep track of sc in wrapper. + if cc.balancerWrapper != nil { + cc.balancerWrapper.close() + } + // Clear all stickiness state. + cc.blockingpicker.clearStickinessState() + + builder := balancer.Get(name) + if builder == nil { + grpclog.Infof("failed to get balancer builder for: %v, using pick_first instead", name) + builder = newPickfirstBuilder() + } + cc.preBalancerName = cc.curBalancerName + cc.curBalancerName = builder.Name() + cc.balancerWrapper = newCCBalancerWrapper(cc, builder, cc.balancerBuildOpts) +} + +func (cc *ClientConn) handleSubConnStateChange(sc balancer.SubConn, s connectivity.State) { + cc.mu.Lock() + if cc.conns == nil { + cc.mu.Unlock() + return + } + // TODO(bar switching) send updates to all balancer wrappers when balancer + // gracefully switching is supported. + cc.balancerWrapper.handleSubConnStateChange(sc, s) + cc.mu.Unlock() +} + +// newAddrConn creates an addrConn for addrs and adds it to cc.conns. +// +// Caller needs to make sure len(addrs) > 0. +func (cc *ClientConn) newAddrConn(addrs []resolver.Address) (*addrConn, error) { + ac := &addrConn{ + cc: cc, + addrs: addrs, + dopts: cc.dopts, + } + ac.ctx, ac.cancel = context.WithCancel(cc.ctx) + // Track ac in cc. This needs to be done before any getTransport(...) is called. + cc.mu.Lock() + if cc.conns == nil { + cc.mu.Unlock() + return nil, ErrClientConnClosing + } + if channelz.IsOn() { + ac.channelzID = channelz.RegisterSubChannel(ac, cc.channelzID, "") + } + cc.conns[ac] = struct{}{} + cc.mu.Unlock() + return ac, nil +} + +// removeAddrConn removes the addrConn in the subConn from clientConn. +// It also tears down the ac with the given error. +func (cc *ClientConn) removeAddrConn(ac *addrConn, err error) { + cc.mu.Lock() + if cc.conns == nil { + cc.mu.Unlock() + return + } + delete(cc.conns, ac) + cc.mu.Unlock() + ac.tearDown(err) +} + +// ChannelzMetric returns ChannelInternalMetric of current ClientConn. +// This is an EXPERIMENTAL API. +func (cc *ClientConn) ChannelzMetric() *channelz.ChannelInternalMetric { + state := cc.GetState() + cc.czmu.RLock() + defer cc.czmu.RUnlock() + return &channelz.ChannelInternalMetric{ + State: state, + Target: cc.target, + CallsStarted: cc.callsStarted, + CallsSucceeded: cc.callsSucceeded, + CallsFailed: cc.callsFailed, + LastCallStartedTimestamp: cc.lastCallStartedTime, + } +} + +func (cc *ClientConn) incrCallsStarted() { + cc.czmu.Lock() + cc.callsStarted++ + // TODO(yuxuanli): will make this a time.Time pointer improve performance? + cc.lastCallStartedTime = time.Now() + cc.czmu.Unlock() +} + +func (cc *ClientConn) incrCallsSucceeded() { + cc.czmu.Lock() + cc.callsSucceeded++ + cc.czmu.Unlock() +} + +func (cc *ClientConn) incrCallsFailed() { + cc.czmu.Lock() + cc.callsFailed++ + cc.czmu.Unlock() +} + +// connect starts to creating transport and also starts the transport monitor +// goroutine for this ac. +// It does nothing if the ac is not IDLE. +// TODO(bar) Move this to the addrConn section. +// This was part of resetAddrConn, keep it here to make the diff look clean. +func (ac *addrConn) connect() error { + ac.mu.Lock() + if ac.state == connectivity.Shutdown { + ac.mu.Unlock() + return errConnClosing + } + if ac.state != connectivity.Idle { + ac.mu.Unlock() + return nil + } + ac.state = connectivity.Connecting + ac.cc.handleSubConnStateChange(ac.acbw, ac.state) + ac.mu.Unlock() + + // Start a goroutine connecting to the server asynchronously. + go func() { + if err := ac.resetTransport(); err != nil { + grpclog.Warningf("Failed to dial %s: %v; please retry.", ac.addrs[0].Addr, err) + if err != errConnClosing { + // Keep this ac in cc.conns, to get the reason it's torn down. + ac.tearDown(err) + } + return + } + ac.transportMonitor() + }() + return nil +} + +// tryUpdateAddrs tries to update ac.addrs with the new addresses list. +// +// It checks whether current connected address of ac is in the new addrs list. +// - If true, it updates ac.addrs and returns true. The ac will keep using +// the existing connection. +// - If false, it does nothing and returns false. +func (ac *addrConn) tryUpdateAddrs(addrs []resolver.Address) bool { + ac.mu.Lock() + defer ac.mu.Unlock() + grpclog.Infof("addrConn: tryUpdateAddrs curAddr: %v, addrs: %v", ac.curAddr, addrs) + if ac.state == connectivity.Shutdown { + ac.addrs = addrs + return true + } + + var curAddrFound bool + for _, a := range addrs { + if reflect.DeepEqual(ac.curAddr, a) { + curAddrFound = true + break + } + } + grpclog.Infof("addrConn: tryUpdateAddrs curAddrFound: %v", curAddrFound) + if curAddrFound { + ac.addrs = addrs + ac.reconnectIdx = 0 // Start reconnecting from beginning in the new list. + } + + return curAddrFound +} + +// GetMethodConfig gets the method config of the input method. +// If there's an exact match for input method (i.e. /service/method), we return +// the corresponding MethodConfig. +// If there isn't an exact match for the input method, we look for the default config +// under the service (i.e /service/). If there is a default MethodConfig for +// the service, we return it. +// Otherwise, we return an empty MethodConfig. +func (cc *ClientConn) GetMethodConfig(method string) MethodConfig { + // TODO: Avoid the locking here. + cc.mu.RLock() + defer cc.mu.RUnlock() + m, ok := cc.sc.Methods[method] + if !ok { + i := strings.LastIndex(method, "/") + m = cc.sc.Methods[method[:i+1]] + } + return m +} + +func (cc *ClientConn) getTransport(ctx context.Context, failfast bool) (transport.ClientTransport, func(balancer.DoneInfo), error) { + t, done, err := cc.blockingpicker.pick(ctx, failfast, balancer.PickOptions{}) + if err != nil { + return nil, nil, toRPCErr(err) + } + return t, done, nil +} + +// handleServiceConfig parses the service config string in JSON format to Go native +// struct ServiceConfig, and store both the struct and the JSON string in ClientConn. +func (cc *ClientConn) handleServiceConfig(js string) error { + if cc.dopts.disableServiceConfig { + return nil + } + sc, err := parseServiceConfig(js) + if err != nil { + return err + } + cc.mu.Lock() + cc.scRaw = js + cc.sc = sc + if sc.LB != nil && *sc.LB != grpclbName { // "grpclb" is not a valid balancer option in service config. + if cc.curBalancerName == grpclbName { + // If current balancer is grpclb, there's at least one grpclb + // balancer address in the resolved list. Don't switch the balancer, + // but change the previous balancer name, so if a new resolved + // address list doesn't contain grpclb address, balancer will be + // switched to *sc.LB. + cc.preBalancerName = *sc.LB + } else { + cc.switchBalancer(*sc.LB) + cc.balancerWrapper.handleResolvedAddrs(cc.curAddresses, nil) + } + } + + if envConfigStickinessOn { + var newStickinessMDKey string + if sc.stickinessMetadataKey != nil && *sc.stickinessMetadataKey != "" { + newStickinessMDKey = *sc.stickinessMetadataKey + } + // newStickinessMDKey is "" if one of the following happens: + // - stickinessMetadataKey is set to "" + // - stickinessMetadataKey field doesn't exist in service config + cc.blockingpicker.updateStickinessMDKey(strings.ToLower(newStickinessMDKey)) + } + + cc.mu.Unlock() + return nil +} + +func (cc *ClientConn) resolveNow(o resolver.ResolveNowOption) { + cc.mu.Lock() + r := cc.resolverWrapper + cc.mu.Unlock() + if r == nil { + return + } + go r.resolveNow(o) +} + +// Close tears down the ClientConn and all underlying connections. +func (cc *ClientConn) Close() error { + defer cc.cancel() + + cc.mu.Lock() + if cc.conns == nil { + cc.mu.Unlock() + return ErrClientConnClosing + } + conns := cc.conns + cc.conns = nil + cc.csMgr.updateState(connectivity.Shutdown) + + rWrapper := cc.resolverWrapper + cc.resolverWrapper = nil + bWrapper := cc.balancerWrapper + cc.balancerWrapper = nil + cc.mu.Unlock() + + cc.blockingpicker.close() + + if rWrapper != nil { + rWrapper.close() + } + if bWrapper != nil { + bWrapper.close() + } + + for ac := range conns { + ac.tearDown(ErrClientConnClosing) + } + if channelz.IsOn() { + channelz.RemoveEntry(cc.channelzID) + } + return nil +} + +// addrConn is a network connection to a given address. +type addrConn struct { + ctx context.Context + cancel context.CancelFunc + + cc *ClientConn + addrs []resolver.Address + dopts dialOptions + events trace.EventLog + acbw balancer.SubConn + + mu sync.Mutex + curAddr resolver.Address + reconnectIdx int // The index in addrs list to start reconnecting from. + state connectivity.State + // ready is closed and becomes nil when a new transport is up or failed + // due to timeout. + ready chan struct{} + transport transport.ClientTransport + + // The reason this addrConn is torn down. + tearDownErr error + + connectRetryNum int + // backoffDeadline is the time until which resetTransport needs to + // wait before increasing connectRetryNum count. + backoffDeadline time.Time + // connectDeadline is the time by which all connection + // negotiations must complete. + connectDeadline time.Time + + channelzID int64 // channelz unique identification number + czmu sync.RWMutex + callsStarted int64 + callsSucceeded int64 + callsFailed int64 + lastCallStartedTime time.Time +} + +// adjustParams updates parameters used to create transports upon +// receiving a GoAway. +func (ac *addrConn) adjustParams(r transport.GoAwayReason) { + switch r { + case transport.GoAwayTooManyPings: + v := 2 * ac.dopts.copts.KeepaliveParams.Time + ac.cc.mu.Lock() + if v > ac.cc.mkp.Time { + ac.cc.mkp.Time = v + } + ac.cc.mu.Unlock() + } +} + +// printf records an event in ac's event log, unless ac has been closed. +// REQUIRES ac.mu is held. +func (ac *addrConn) printf(format string, a ...interface{}) { + if ac.events != nil { + ac.events.Printf(format, a...) + } +} + +// errorf records an error in ac's event log, unless ac has been closed. +// REQUIRES ac.mu is held. +func (ac *addrConn) errorf(format string, a ...interface{}) { + if ac.events != nil { + ac.events.Errorf(format, a...) + } +} + +// resetTransport recreates a transport to the address for ac. The old +// transport will close itself on error or when the clientconn is closed. +// The created transport must receive initial settings frame from the server. +// In case that doesn't happen, transportMonitor will kill the newly created +// transport after connectDeadline has expired. +// In case there was an error on the transport before the settings frame was +// received, resetTransport resumes connecting to backends after the one that +// was previously connected to. In case end of the list is reached, resetTransport +// backs off until the original deadline. +// If the DialOption WithWaitForHandshake was set, resetTrasport returns +// successfully only after server settings are received. +// +// TODO(bar) make sure all state transitions are valid. +func (ac *addrConn) resetTransport() error { + ac.mu.Lock() + if ac.state == connectivity.Shutdown { + ac.mu.Unlock() + return errConnClosing + } + if ac.ready != nil { + close(ac.ready) + ac.ready = nil + } + ac.transport = nil + ridx := ac.reconnectIdx + ac.mu.Unlock() + ac.cc.mu.RLock() + ac.dopts.copts.KeepaliveParams = ac.cc.mkp + ac.cc.mu.RUnlock() + var backoffDeadline, connectDeadline time.Time + for connectRetryNum := 0; ; connectRetryNum++ { + ac.mu.Lock() + if ac.backoffDeadline.IsZero() { + // This means either a successful HTTP2 connection was established + // or this is the first time this addrConn is trying to establish a + // connection. + backoffFor := ac.dopts.bs.backoff(connectRetryNum) // time.Duration. + // This will be the duration that dial gets to finish. + dialDuration := getMinConnectTimeout() + if backoffFor > dialDuration { + // Give dial more time as we keep failing to connect. + dialDuration = backoffFor + } + start := time.Now() + backoffDeadline = start.Add(backoffFor) + connectDeadline = start.Add(dialDuration) + ridx = 0 // Start connecting from the beginning. + } else { + // Continue trying to connect with the same deadlines. + connectRetryNum = ac.connectRetryNum + backoffDeadline = ac.backoffDeadline + connectDeadline = ac.connectDeadline + ac.backoffDeadline = time.Time{} + ac.connectDeadline = time.Time{} + ac.connectRetryNum = 0 + } + if ac.state == connectivity.Shutdown { + ac.mu.Unlock() + return errConnClosing + } + ac.printf("connecting") + if ac.state != connectivity.Connecting { + ac.state = connectivity.Connecting + ac.cc.handleSubConnStateChange(ac.acbw, ac.state) + } + // copy ac.addrs in case of race + addrsIter := make([]resolver.Address, len(ac.addrs)) + copy(addrsIter, ac.addrs) + copts := ac.dopts.copts + ac.mu.Unlock() + connected, err := ac.createTransport(connectRetryNum, ridx, backoffDeadline, connectDeadline, addrsIter, copts) + if err != nil { + return err + } + if connected { + return nil + } + } +} + +// createTransport creates a connection to one of the backends in addrs. +// It returns true if a connection was established. +func (ac *addrConn) createTransport(connectRetryNum, ridx int, backoffDeadline, connectDeadline time.Time, addrs []resolver.Address, copts transport.ConnectOptions) (bool, error) { + for i := ridx; i < len(addrs); i++ { + addr := addrs[i] + target := transport.TargetInfo{ + Addr: addr.Addr, + Metadata: addr.Metadata, + Authority: ac.cc.authority, + } + done := make(chan struct{}) + onPrefaceReceipt := func() { + ac.mu.Lock() + close(done) + if !ac.backoffDeadline.IsZero() { + // If we haven't already started reconnecting to + // other backends. + // Note, this can happen when writer notices an error + // and triggers resetTransport while at the same time + // reader receives the preface and invokes this closure. + ac.backoffDeadline = time.Time{} + ac.connectDeadline = time.Time{} + ac.connectRetryNum = 0 + } + ac.mu.Unlock() + } + // Do not cancel in the success path because of + // this issue in Go1.6: https://github.com/golang/go/issues/15078. + connectCtx, cancel := context.WithDeadline(ac.ctx, connectDeadline) + if channelz.IsOn() { + copts.ChannelzParentID = ac.channelzID + } + newTr, err := transport.NewClientTransport(connectCtx, ac.cc.ctx, target, copts, onPrefaceReceipt) + if err != nil { + cancel() + ac.cc.blockingpicker.updateConnectionError(err) + ac.mu.Lock() + if ac.state == connectivity.Shutdown { + // ac.tearDown(...) has been invoked. + ac.mu.Unlock() + return false, errConnClosing + } + ac.mu.Unlock() + grpclog.Warningf("grpc: addrConn.createTransport failed to connect to %v. Err :%v. Reconnecting...", addr, err) + continue + } + if ac.dopts.waitForHandshake { + select { + case <-done: + case <-connectCtx.Done(): + // Didn't receive server preface, must kill this new transport now. + grpclog.Warningf("grpc: addrConn.createTransport failed to receive server preface before deadline.") + newTr.Close() + break + case <-ac.ctx.Done(): + } + } + ac.mu.Lock() + if ac.state == connectivity.Shutdown { + ac.mu.Unlock() + // ac.tearDonn(...) has been invoked. + newTr.Close() + return false, errConnClosing + } + ac.printf("ready") + ac.state = connectivity.Ready + ac.cc.handleSubConnStateChange(ac.acbw, ac.state) + ac.transport = newTr + ac.curAddr = addr + if ac.ready != nil { + close(ac.ready) + ac.ready = nil + } + select { + case <-done: + // If the server has responded back with preface already, + // don't set the reconnect parameters. + default: + ac.connectRetryNum = connectRetryNum + ac.backoffDeadline = backoffDeadline + ac.connectDeadline = connectDeadline + ac.reconnectIdx = i + 1 // Start reconnecting from the next backend in the list. + } + ac.mu.Unlock() + return true, nil + } + ac.mu.Lock() + if ac.state == connectivity.Shutdown { + ac.mu.Unlock() + return false, errConnClosing + } + ac.state = connectivity.TransientFailure + ac.cc.handleSubConnStateChange(ac.acbw, ac.state) + ac.cc.resolveNow(resolver.ResolveNowOption{}) + if ac.ready != nil { + close(ac.ready) + ac.ready = nil + } + ac.mu.Unlock() + timer := time.NewTimer(backoffDeadline.Sub(time.Now())) + select { + case <-timer.C: + case <-ac.ctx.Done(): + timer.Stop() + return false, ac.ctx.Err() + } + return false, nil +} + +// Run in a goroutine to track the error in transport and create the +// new transport if an error happens. It returns when the channel is closing. +func (ac *addrConn) transportMonitor() { + for { + var timer *time.Timer + var cdeadline <-chan time.Time + ac.mu.Lock() + t := ac.transport + if !ac.connectDeadline.IsZero() { + timer = time.NewTimer(ac.connectDeadline.Sub(time.Now())) + cdeadline = timer.C + } + ac.mu.Unlock() + // Block until we receive a goaway or an error occurs. + select { + case <-t.GoAway(): + done := t.Error() + cleanup := t.Close + // Since this transport will be orphaned (won't have a transportMonitor) + // we need to launch a goroutine to keep track of clientConn.Close() + // happening since it might not be noticed by any other goroutine for a while. + go func() { + <-done + cleanup() + }() + case <-t.Error(): + // In case this is triggered because clientConn.Close() + // was called, we want to immeditately close the transport + // since no other goroutine might notice it for a while. + t.Close() + case <-cdeadline: + ac.mu.Lock() + // This implies that client received server preface. + if ac.backoffDeadline.IsZero() { + ac.mu.Unlock() + continue + } + ac.mu.Unlock() + timer = nil + // No server preface received until deadline. + // Kill the connection. + grpclog.Warningf("grpc: addrConn.transportMonitor didn't get server preface after waiting. Closing the new transport now.") + t.Close() + } + if timer != nil { + timer.Stop() + } + // If a GoAway happened, regardless of error, adjust our keepalive + // parameters as appropriate. + select { + case <-t.GoAway(): + ac.adjustParams(t.GetGoAwayReason()) + default: + } + ac.mu.Lock() + if ac.state == connectivity.Shutdown { + ac.mu.Unlock() + return + } + // Set connectivity state to TransientFailure before calling + // resetTransport. Transition READY->CONNECTING is not valid. + ac.state = connectivity.TransientFailure + ac.cc.handleSubConnStateChange(ac.acbw, ac.state) + ac.cc.resolveNow(resolver.ResolveNowOption{}) + ac.curAddr = resolver.Address{} + ac.mu.Unlock() + if err := ac.resetTransport(); err != nil { + ac.mu.Lock() + ac.printf("transport exiting: %v", err) + ac.mu.Unlock() + grpclog.Warningf("grpc: addrConn.transportMonitor exits due to: %v", err) + if err != errConnClosing { + // Keep this ac in cc.conns, to get the reason it's torn down. + ac.tearDown(err) + } + return + } + } +} + +// wait blocks until i) the new transport is up or ii) ctx is done or iii) ac is closed or +// iv) transport is in connectivity.TransientFailure and there is a balancer/failfast is true. +func (ac *addrConn) wait(ctx context.Context, hasBalancer, failfast bool) (transport.ClientTransport, error) { + for { + ac.mu.Lock() + switch { + case ac.state == connectivity.Shutdown: + if failfast || !hasBalancer { + // RPC is failfast or balancer is nil. This RPC should fail with ac.tearDownErr. + err := ac.tearDownErr + ac.mu.Unlock() + return nil, err + } + ac.mu.Unlock() + return nil, errConnClosing + case ac.state == connectivity.Ready: + ct := ac.transport + ac.mu.Unlock() + return ct, nil + case ac.state == connectivity.TransientFailure: + if failfast || hasBalancer { + ac.mu.Unlock() + return nil, errConnUnavailable + } + } + ready := ac.ready + if ready == nil { + ready = make(chan struct{}) + ac.ready = ready + } + ac.mu.Unlock() + select { + case <-ctx.Done(): + return nil, toRPCErr(ctx.Err()) + // Wait until the new transport is ready or failed. + case <-ready: + } + } +} + +// getReadyTransport returns the transport if ac's state is READY. +// Otherwise it returns nil, false. +// If ac's state is IDLE, it will trigger ac to connect. +func (ac *addrConn) getReadyTransport() (transport.ClientTransport, bool) { + ac.mu.Lock() + if ac.state == connectivity.Ready { + t := ac.transport + ac.mu.Unlock() + return t, true + } + var idle bool + if ac.state == connectivity.Idle { + idle = true + } + ac.mu.Unlock() + // Trigger idle ac to connect. + if idle { + ac.connect() + } + return nil, false +} + +// tearDown starts to tear down the addrConn. +// TODO(zhaoq): Make this synchronous to avoid unbounded memory consumption in +// some edge cases (e.g., the caller opens and closes many addrConn's in a +// tight loop. +// tearDown doesn't remove ac from ac.cc.conns. +func (ac *addrConn) tearDown(err error) { + ac.cancel() + ac.mu.Lock() + defer ac.mu.Unlock() + if ac.state == connectivity.Shutdown { + return + } + ac.curAddr = resolver.Address{} + if err == errConnDrain && ac.transport != nil { + // GracefulClose(...) may be executed multiple times when + // i) receiving multiple GoAway frames from the server; or + // ii) there are concurrent name resolver/Balancer triggered + // address removal and GoAway. + ac.transport.GracefulClose() + } + ac.state = connectivity.Shutdown + ac.tearDownErr = err + ac.cc.handleSubConnStateChange(ac.acbw, ac.state) + if ac.events != nil { + ac.events.Finish() + ac.events = nil + } + if ac.ready != nil { + close(ac.ready) + ac.ready = nil + } + if channelz.IsOn() { + channelz.RemoveEntry(ac.channelzID) + } +} + +func (ac *addrConn) getState() connectivity.State { + ac.mu.Lock() + defer ac.mu.Unlock() + return ac.state +} + +func (ac *addrConn) getCurAddr() (ret resolver.Address) { + ac.mu.Lock() + ret = ac.curAddr + ac.mu.Unlock() + return +} + +func (ac *addrConn) ChannelzMetric() *channelz.ChannelInternalMetric { + ac.mu.Lock() + addr := ac.curAddr.Addr + ac.mu.Unlock() + state := ac.getState() + ac.czmu.RLock() + defer ac.czmu.RUnlock() + return &channelz.ChannelInternalMetric{ + State: state, + Target: addr, + CallsStarted: ac.callsStarted, + CallsSucceeded: ac.callsSucceeded, + CallsFailed: ac.callsFailed, + LastCallStartedTimestamp: ac.lastCallStartedTime, + } +} + +func (ac *addrConn) incrCallsStarted() { + ac.czmu.Lock() + ac.callsStarted++ + ac.lastCallStartedTime = time.Now() + ac.czmu.Unlock() +} + +func (ac *addrConn) incrCallsSucceeded() { + ac.czmu.Lock() + ac.callsSucceeded++ + ac.czmu.Unlock() +} + +func (ac *addrConn) incrCallsFailed() { + ac.czmu.Lock() + ac.callsFailed++ + ac.czmu.Unlock() +} + +// ErrClientConnTimeout indicates that the ClientConn cannot establish the +// underlying connections within the specified timeout. +// +// Deprecated: This error is never returned by grpc and should not be +// referenced by users. +var ErrClientConnTimeout = errors.New("grpc: timed out when dialing") diff --git a/vendor/google.golang.org/grpc/codec.go b/vendor/google.golang.org/grpc/codec.go new file mode 100644 index 00000000000..12977654781 --- /dev/null +++ b/vendor/google.golang.org/grpc/codec.go @@ -0,0 +1,50 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "google.golang.org/grpc/encoding" + _ "google.golang.org/grpc/encoding/proto" // to register the Codec for "proto" +) + +// baseCodec contains the functionality of both Codec and encoding.Codec, but +// omits the name/string, which vary between the two and are not needed for +// anything besides the registry in the encoding package. +type baseCodec interface { + Marshal(v interface{}) ([]byte, error) + Unmarshal(data []byte, v interface{}) error +} + +var _ baseCodec = Codec(nil) +var _ baseCodec = encoding.Codec(nil) + +// Codec defines the interface gRPC uses to encode and decode messages. +// Note that implementations of this interface must be thread safe; +// a Codec's methods can be called from concurrent goroutines. +// +// Deprecated: use encoding.Codec instead. +type Codec interface { + // Marshal returns the wire format of v. + Marshal(v interface{}) ([]byte, error) + // Unmarshal parses the wire format into v. + Unmarshal(data []byte, v interface{}) error + // String returns the name of the Codec implementation. This is unused by + // gRPC. + String() string +} diff --git a/vendor/google.golang.org/grpc/codegen.sh b/vendor/google.golang.org/grpc/codegen.sh new file mode 100755 index 00000000000..4cdc6ba7c09 --- /dev/null +++ b/vendor/google.golang.org/grpc/codegen.sh @@ -0,0 +1,17 @@ +#!/usr/bin/env bash + +# This script serves as an example to demonstrate how to generate the gRPC-Go +# interface and the related messages from .proto file. +# +# It assumes the installation of i) Google proto buffer compiler at +# https://github.com/google/protobuf (after v2.6.1) and ii) the Go codegen +# plugin at https://github.com/golang/protobuf (after 2015-02-20). If you have +# not, please install them first. +# +# We recommend running this script at $GOPATH/src. +# +# If this is not what you need, feel free to make your own scripts. Again, this +# script is for demonstration purpose. +# +proto=$1 +protoc --go_out=plugins=grpc:. $proto diff --git a/vendor/google.golang.org/grpc/codes/code_string.go b/vendor/google.golang.org/grpc/codes/code_string.go new file mode 100644 index 00000000000..0b206a57822 --- /dev/null +++ b/vendor/google.golang.org/grpc/codes/code_string.go @@ -0,0 +1,62 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package codes + +import "strconv" + +func (c Code) String() string { + switch c { + case OK: + return "OK" + case Canceled: + return "Canceled" + case Unknown: + return "Unknown" + case InvalidArgument: + return "InvalidArgument" + case DeadlineExceeded: + return "DeadlineExceeded" + case NotFound: + return "NotFound" + case AlreadyExists: + return "AlreadyExists" + case PermissionDenied: + return "PermissionDenied" + case ResourceExhausted: + return "ResourceExhausted" + case FailedPrecondition: + return "FailedPrecondition" + case Aborted: + return "Aborted" + case OutOfRange: + return "OutOfRange" + case Unimplemented: + return "Unimplemented" + case Internal: + return "Internal" + case Unavailable: + return "Unavailable" + case DataLoss: + return "DataLoss" + case Unauthenticated: + return "Unauthenticated" + default: + return "Code(" + strconv.FormatInt(int64(c), 10) + ")" + } +} diff --git a/vendor/google.golang.org/grpc/codes/codes.go b/vendor/google.golang.org/grpc/codes/codes.go new file mode 100644 index 00000000000..a8280ae660d --- /dev/null +++ b/vendor/google.golang.org/grpc/codes/codes.go @@ -0,0 +1,184 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package codes defines the canonical error codes used by gRPC. It is +// consistent across various languages. +package codes // import "google.golang.org/grpc/codes" + +import ( + "fmt" +) + +// A Code is an unsigned 32-bit error code as defined in the gRPC spec. +type Code uint32 + +const ( + // OK is returned on success. + OK Code = 0 + + // Canceled indicates the operation was canceled (typically by the caller). + Canceled Code = 1 + + // Unknown error. An example of where this error may be returned is + // if a Status value received from another address space belongs to + // an error-space that is not known in this address space. Also + // errors raised by APIs that do not return enough error information + // may be converted to this error. + Unknown Code = 2 + + // InvalidArgument indicates client specified an invalid argument. + // Note that this differs from FailedPrecondition. It indicates arguments + // that are problematic regardless of the state of the system + // (e.g., a malformed file name). + InvalidArgument Code = 3 + + // DeadlineExceeded means operation expired before completion. + // For operations that change the state of the system, this error may be + // returned even if the operation has completed successfully. For + // example, a successful response from a server could have been delayed + // long enough for the deadline to expire. + DeadlineExceeded Code = 4 + + // NotFound means some requested entity (e.g., file or directory) was + // not found. + NotFound Code = 5 + + // AlreadyExists means an attempt to create an entity failed because one + // already exists. + AlreadyExists Code = 6 + + // PermissionDenied indicates the caller does not have permission to + // execute the specified operation. It must not be used for rejections + // caused by exhausting some resource (use ResourceExhausted + // instead for those errors). It must not be + // used if the caller cannot be identified (use Unauthenticated + // instead for those errors). + PermissionDenied Code = 7 + + // ResourceExhausted indicates some resource has been exhausted, perhaps + // a per-user quota, or perhaps the entire file system is out of space. + ResourceExhausted Code = 8 + + // FailedPrecondition indicates operation was rejected because the + // system is not in a state required for the operation's execution. + // For example, directory to be deleted may be non-empty, an rmdir + // operation is applied to a non-directory, etc. + // + // A litmus test that may help a service implementor in deciding + // between FailedPrecondition, Aborted, and Unavailable: + // (a) Use Unavailable if the client can retry just the failing call. + // (b) Use Aborted if the client should retry at a higher-level + // (e.g., restarting a read-modify-write sequence). + // (c) Use FailedPrecondition if the client should not retry until + // the system state has been explicitly fixed. E.g., if an "rmdir" + // fails because the directory is non-empty, FailedPrecondition + // should be returned since the client should not retry unless + // they have first fixed up the directory by deleting files from it. + // (d) Use FailedPrecondition if the client performs conditional + // REST Get/Update/Delete on a resource and the resource on the + // server does not match the condition. E.g., conflicting + // read-modify-write on the same resource. + FailedPrecondition Code = 9 + + // Aborted indicates the operation was aborted, typically due to a + // concurrency issue like sequencer check failures, transaction aborts, + // etc. + // + // See litmus test above for deciding between FailedPrecondition, + // Aborted, and Unavailable. + Aborted Code = 10 + + // OutOfRange means operation was attempted past the valid range. + // E.g., seeking or reading past end of file. + // + // Unlike InvalidArgument, this error indicates a problem that may + // be fixed if the system state changes. For example, a 32-bit file + // system will generate InvalidArgument if asked to read at an + // offset that is not in the range [0,2^32-1], but it will generate + // OutOfRange if asked to read from an offset past the current + // file size. + // + // There is a fair bit of overlap between FailedPrecondition and + // OutOfRange. We recommend using OutOfRange (the more specific + // error) when it applies so that callers who are iterating through + // a space can easily look for an OutOfRange error to detect when + // they are done. + OutOfRange Code = 11 + + // Unimplemented indicates operation is not implemented or not + // supported/enabled in this service. + Unimplemented Code = 12 + + // Internal errors. Means some invariants expected by underlying + // system has been broken. If you see one of these errors, + // something is very broken. + Internal Code = 13 + + // Unavailable indicates the service is currently unavailable. + // This is a most likely a transient condition and may be corrected + // by retrying with a backoff. + // + // See litmus test above for deciding between FailedPrecondition, + // Aborted, and Unavailable. + Unavailable Code = 14 + + // DataLoss indicates unrecoverable data loss or corruption. + DataLoss Code = 15 + + // Unauthenticated indicates the request does not have valid + // authentication credentials for the operation. + Unauthenticated Code = 16 +) + +var strToCode = map[string]Code{ + `"OK"`: OK, + `"CANCELLED"`:/* [sic] */ Canceled, + `"UNKNOWN"`: Unknown, + `"INVALID_ARGUMENT"`: InvalidArgument, + `"DEADLINE_EXCEEDED"`: DeadlineExceeded, + `"NOT_FOUND"`: NotFound, + `"ALREADY_EXISTS"`: AlreadyExists, + `"PERMISSION_DENIED"`: PermissionDenied, + `"RESOURCE_EXHAUSTED"`: ResourceExhausted, + `"FAILED_PRECONDITION"`: FailedPrecondition, + `"ABORTED"`: Aborted, + `"OUT_OF_RANGE"`: OutOfRange, + `"UNIMPLEMENTED"`: Unimplemented, + `"INTERNAL"`: Internal, + `"UNAVAILABLE"`: Unavailable, + `"DATA_LOSS"`: DataLoss, + `"UNAUTHENTICATED"`: Unauthenticated, +} + +// UnmarshalJSON unmarshals b into the Code. +func (c *Code) UnmarshalJSON(b []byte) error { + // From json.Unmarshaler: By convention, to approximate the behavior of + // Unmarshal itself, Unmarshalers implement UnmarshalJSON([]byte("null")) as + // a no-op. + if string(b) == "null" { + return nil + } + if c == nil { + return fmt.Errorf("nil receiver passed to UnmarshalJSON") + } + if jc, ok := strToCode[string(b)]; ok { + *c = jc + return nil + } + return fmt.Errorf("invalid code: %q", string(b)) +} diff --git a/vendor/google.golang.org/grpc/connectivity/connectivity.go b/vendor/google.golang.org/grpc/connectivity/connectivity.go new file mode 100644 index 00000000000..568ef5dc68b --- /dev/null +++ b/vendor/google.golang.org/grpc/connectivity/connectivity.go @@ -0,0 +1,72 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package connectivity defines connectivity semantics. +// For details, see https://github.com/grpc/grpc/blob/master/doc/connectivity-semantics-and-api.md. +// All APIs in this package are experimental. +package connectivity + +import ( + "golang.org/x/net/context" + "google.golang.org/grpc/grpclog" +) + +// State indicates the state of connectivity. +// It can be the state of a ClientConn or SubConn. +type State int + +func (s State) String() string { + switch s { + case Idle: + return "IDLE" + case Connecting: + return "CONNECTING" + case Ready: + return "READY" + case TransientFailure: + return "TRANSIENT_FAILURE" + case Shutdown: + return "SHUTDOWN" + default: + grpclog.Errorf("unknown connectivity state: %d", s) + return "Invalid-State" + } +} + +const ( + // Idle indicates the ClientConn is idle. + Idle State = iota + // Connecting indicates the ClienConn is connecting. + Connecting + // Ready indicates the ClientConn is ready for work. + Ready + // TransientFailure indicates the ClientConn has seen a failure but expects to recover. + TransientFailure + // Shutdown indicates the ClientConn has started shutting down. + Shutdown +) + +// Reporter reports the connectivity states. +type Reporter interface { + // CurrentState returns the current state of the reporter. + CurrentState() State + // WaitForStateChange blocks until the reporter's state is different from the given state, + // and returns true. + // It returns false if <-ctx.Done() can proceed (ctx got timeout or got canceled). + WaitForStateChange(context.Context, State) bool +} diff --git a/vendor/google.golang.org/grpc/credentials/credentials.go b/vendor/google.golang.org/grpc/credentials/credentials.go new file mode 100644 index 00000000000..3351bf0ee5f --- /dev/null +++ b/vendor/google.golang.org/grpc/credentials/credentials.go @@ -0,0 +1,220 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package credentials implements various credentials supported by gRPC library, +// which encapsulate all the state needed by a client to authenticate with a +// server and make various assertions, e.g., about the client's identity, role, +// or whether it is authorized to make a particular call. +package credentials // import "google.golang.org/grpc/credentials" + +import ( + "crypto/tls" + "crypto/x509" + "errors" + "fmt" + "io/ioutil" + "net" + "strings" + + "golang.org/x/net/context" +) + +// alpnProtoStr are the specified application level protocols for gRPC. +var alpnProtoStr = []string{"h2"} + +// PerRPCCredentials defines the common interface for the credentials which need to +// attach security information to every RPC (e.g., oauth2). +type PerRPCCredentials interface { + // GetRequestMetadata gets the current request metadata, refreshing + // tokens if required. This should be called by the transport layer on + // each request, and the data should be populated in headers or other + // context. If a status code is returned, it will be used as the status + // for the RPC. uri is the URI of the entry point for the request. + // When supported by the underlying implementation, ctx can be used for + // timeout and cancellation. + // TODO(zhaoq): Define the set of the qualified keys instead of leaving + // it as an arbitrary string. + GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) + // RequireTransportSecurity indicates whether the credentials requires + // transport security. + RequireTransportSecurity() bool +} + +// ProtocolInfo provides information regarding the gRPC wire protocol version, +// security protocol, security protocol version in use, server name, etc. +type ProtocolInfo struct { + // ProtocolVersion is the gRPC wire protocol version. + ProtocolVersion string + // SecurityProtocol is the security protocol in use. + SecurityProtocol string + // SecurityVersion is the security protocol version. + SecurityVersion string + // ServerName is the user-configured server name. + ServerName string +} + +// AuthInfo defines the common interface for the auth information the users are interested in. +type AuthInfo interface { + AuthType() string +} + +// ErrConnDispatched indicates that rawConn has been dispatched out of gRPC +// and the caller should not close rawConn. +var ErrConnDispatched = errors.New("credentials: rawConn is dispatched out of gRPC") + +// TransportCredentials defines the common interface for all the live gRPC wire +// protocols and supported transport security protocols (e.g., TLS, SSL). +type TransportCredentials interface { + // ClientHandshake does the authentication handshake specified by the corresponding + // authentication protocol on rawConn for clients. It returns the authenticated + // connection and the corresponding auth information about the connection. + // Implementations must use the provided context to implement timely cancellation. + // gRPC will try to reconnect if the error returned is a temporary error + // (io.EOF, context.DeadlineExceeded or err.Temporary() == true). + // If the returned error is a wrapper error, implementations should make sure that + // the error implements Temporary() to have the correct retry behaviors. + // + // If the returned net.Conn is closed, it MUST close the net.Conn provided. + ClientHandshake(context.Context, string, net.Conn) (net.Conn, AuthInfo, error) + // ServerHandshake does the authentication handshake for servers. It returns + // the authenticated connection and the corresponding auth information about + // the connection. + // + // If the returned net.Conn is closed, it MUST close the net.Conn provided. + ServerHandshake(net.Conn) (net.Conn, AuthInfo, error) + // Info provides the ProtocolInfo of this TransportCredentials. + Info() ProtocolInfo + // Clone makes a copy of this TransportCredentials. + Clone() TransportCredentials + // OverrideServerName overrides the server name used to verify the hostname on the returned certificates from the server. + // gRPC internals also use it to override the virtual hosting name if it is set. + // It must be called before dialing. Currently, this is only used by grpclb. + OverrideServerName(string) error +} + +// TLSInfo contains the auth information for a TLS authenticated connection. +// It implements the AuthInfo interface. +type TLSInfo struct { + State tls.ConnectionState +} + +// AuthType returns the type of TLSInfo as a string. +func (t TLSInfo) AuthType() string { + return "tls" +} + +// tlsCreds is the credentials required for authenticating a connection using TLS. +type tlsCreds struct { + // TLS configuration + config *tls.Config +} + +func (c tlsCreds) Info() ProtocolInfo { + return ProtocolInfo{ + SecurityProtocol: "tls", + SecurityVersion: "1.2", + ServerName: c.config.ServerName, + } +} + +func (c *tlsCreds) ClientHandshake(ctx context.Context, authority string, rawConn net.Conn) (_ net.Conn, _ AuthInfo, err error) { + // use local cfg to avoid clobbering ServerName if using multiple endpoints + cfg := cloneTLSConfig(c.config) + if cfg.ServerName == "" { + colonPos := strings.LastIndex(authority, ":") + if colonPos == -1 { + colonPos = len(authority) + } + cfg.ServerName = authority[:colonPos] + } + conn := tls.Client(rawConn, cfg) + errChannel := make(chan error, 1) + go func() { + errChannel <- conn.Handshake() + }() + select { + case err := <-errChannel: + if err != nil { + return nil, nil, err + } + case <-ctx.Done(): + return nil, nil, ctx.Err() + } + return conn, TLSInfo{conn.ConnectionState()}, nil +} + +func (c *tlsCreds) ServerHandshake(rawConn net.Conn) (net.Conn, AuthInfo, error) { + conn := tls.Server(rawConn, c.config) + if err := conn.Handshake(); err != nil { + return nil, nil, err + } + return conn, TLSInfo{conn.ConnectionState()}, nil +} + +func (c *tlsCreds) Clone() TransportCredentials { + return NewTLS(c.config) +} + +func (c *tlsCreds) OverrideServerName(serverNameOverride string) error { + c.config.ServerName = serverNameOverride + return nil +} + +// NewTLS uses c to construct a TransportCredentials based on TLS. +func NewTLS(c *tls.Config) TransportCredentials { + tc := &tlsCreds{cloneTLSConfig(c)} + tc.config.NextProtos = alpnProtoStr + return tc +} + +// NewClientTLSFromCert constructs TLS credentials from the input certificate for client. +// serverNameOverride is for testing only. If set to a non empty string, +// it will override the virtual host name of authority (e.g. :authority header field) in requests. +func NewClientTLSFromCert(cp *x509.CertPool, serverNameOverride string) TransportCredentials { + return NewTLS(&tls.Config{ServerName: serverNameOverride, RootCAs: cp}) +} + +// NewClientTLSFromFile constructs TLS credentials from the input certificate file for client. +// serverNameOverride is for testing only. If set to a non empty string, +// it will override the virtual host name of authority (e.g. :authority header field) in requests. +func NewClientTLSFromFile(certFile, serverNameOverride string) (TransportCredentials, error) { + b, err := ioutil.ReadFile(certFile) + if err != nil { + return nil, err + } + cp := x509.NewCertPool() + if !cp.AppendCertsFromPEM(b) { + return nil, fmt.Errorf("credentials: failed to append certificates") + } + return NewTLS(&tls.Config{ServerName: serverNameOverride, RootCAs: cp}), nil +} + +// NewServerTLSFromCert constructs TLS credentials from the input certificate for server. +func NewServerTLSFromCert(cert *tls.Certificate) TransportCredentials { + return NewTLS(&tls.Config{Certificates: []tls.Certificate{*cert}}) +} + +// NewServerTLSFromFile constructs TLS credentials from the input certificate file and key +// file for server. +func NewServerTLSFromFile(certFile, keyFile string) (TransportCredentials, error) { + cert, err := tls.LoadX509KeyPair(certFile, keyFile) + if err != nil { + return nil, err + } + return NewTLS(&tls.Config{Certificates: []tls.Certificate{cert}}), nil +} diff --git a/vendor/google.golang.org/grpc/credentials/credentials_util_go17.go b/vendor/google.golang.org/grpc/credentials/credentials_util_go17.go new file mode 100644 index 00000000000..60409aac0fb --- /dev/null +++ b/vendor/google.golang.org/grpc/credentials/credentials_util_go17.go @@ -0,0 +1,60 @@ +// +build go1.7 +// +build !go1.8 + +/* + * + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package credentials + +import ( + "crypto/tls" +) + +// cloneTLSConfig returns a shallow clone of the exported +// fields of cfg, ignoring the unexported sync.Once, which +// contains a mutex and must not be copied. +// +// If cfg is nil, a new zero tls.Config is returned. +func cloneTLSConfig(cfg *tls.Config) *tls.Config { + if cfg == nil { + return &tls.Config{} + } + return &tls.Config{ + Rand: cfg.Rand, + Time: cfg.Time, + Certificates: cfg.Certificates, + NameToCertificate: cfg.NameToCertificate, + GetCertificate: cfg.GetCertificate, + RootCAs: cfg.RootCAs, + NextProtos: cfg.NextProtos, + ServerName: cfg.ServerName, + ClientAuth: cfg.ClientAuth, + ClientCAs: cfg.ClientCAs, + InsecureSkipVerify: cfg.InsecureSkipVerify, + CipherSuites: cfg.CipherSuites, + PreferServerCipherSuites: cfg.PreferServerCipherSuites, + SessionTicketsDisabled: cfg.SessionTicketsDisabled, + SessionTicketKey: cfg.SessionTicketKey, + ClientSessionCache: cfg.ClientSessionCache, + MinVersion: cfg.MinVersion, + MaxVersion: cfg.MaxVersion, + CurvePreferences: cfg.CurvePreferences, + DynamicRecordSizingDisabled: cfg.DynamicRecordSizingDisabled, + Renegotiation: cfg.Renegotiation, + } +} diff --git a/vendor/google.golang.org/grpc/credentials/credentials_util_go18.go b/vendor/google.golang.org/grpc/credentials/credentials_util_go18.go new file mode 100644 index 00000000000..93f0e1d8de2 --- /dev/null +++ b/vendor/google.golang.org/grpc/credentials/credentials_util_go18.go @@ -0,0 +1,38 @@ +// +build go1.8 + +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package credentials + +import ( + "crypto/tls" +) + +// cloneTLSConfig returns a shallow clone of the exported +// fields of cfg, ignoring the unexported sync.Once, which +// contains a mutex and must not be copied. +// +// If cfg is nil, a new zero tls.Config is returned. +func cloneTLSConfig(cfg *tls.Config) *tls.Config { + if cfg == nil { + return &tls.Config{} + } + + return cfg.Clone() +} diff --git a/vendor/google.golang.org/grpc/credentials/credentials_util_pre_go17.go b/vendor/google.golang.org/grpc/credentials/credentials_util_pre_go17.go new file mode 100644 index 00000000000..d6bbcc9fdd9 --- /dev/null +++ b/vendor/google.golang.org/grpc/credentials/credentials_util_pre_go17.go @@ -0,0 +1,57 @@ +// +build !go1.7 + +/* + * + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package credentials + +import ( + "crypto/tls" +) + +// cloneTLSConfig returns a shallow clone of the exported +// fields of cfg, ignoring the unexported sync.Once, which +// contains a mutex and must not be copied. +// +// If cfg is nil, a new zero tls.Config is returned. +func cloneTLSConfig(cfg *tls.Config) *tls.Config { + if cfg == nil { + return &tls.Config{} + } + return &tls.Config{ + Rand: cfg.Rand, + Time: cfg.Time, + Certificates: cfg.Certificates, + NameToCertificate: cfg.NameToCertificate, + GetCertificate: cfg.GetCertificate, + RootCAs: cfg.RootCAs, + NextProtos: cfg.NextProtos, + ServerName: cfg.ServerName, + ClientAuth: cfg.ClientAuth, + ClientCAs: cfg.ClientCAs, + InsecureSkipVerify: cfg.InsecureSkipVerify, + CipherSuites: cfg.CipherSuites, + PreferServerCipherSuites: cfg.PreferServerCipherSuites, + SessionTicketsDisabled: cfg.SessionTicketsDisabled, + SessionTicketKey: cfg.SessionTicketKey, + ClientSessionCache: cfg.ClientSessionCache, + MinVersion: cfg.MinVersion, + MaxVersion: cfg.MaxVersion, + CurvePreferences: cfg.CurvePreferences, + } +} diff --git a/vendor/google.golang.org/grpc/doc.go b/vendor/google.golang.org/grpc/doc.go new file mode 100644 index 00000000000..187adbb117f --- /dev/null +++ b/vendor/google.golang.org/grpc/doc.go @@ -0,0 +1,24 @@ +/* + * + * Copyright 2015 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +/* +Package grpc implements an RPC system called gRPC. + +See grpc.io for more information about gRPC. +*/ +package grpc // import "google.golang.org/grpc" diff --git a/vendor/google.golang.org/grpc/encoding/encoding.go b/vendor/google.golang.org/grpc/encoding/encoding.go new file mode 100644 index 00000000000..ade8b7cec73 --- /dev/null +++ b/vendor/google.golang.org/grpc/encoding/encoding.go @@ -0,0 +1,118 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package encoding defines the interface for the compressor and codec, and +// functions to register and retrieve compressors and codecs. +// +// This package is EXPERIMENTAL. +package encoding + +import ( + "io" + "strings" +) + +// Identity specifies the optional encoding for uncompressed streams. +// It is intended for grpc internal use only. +const Identity = "identity" + +// Compressor is used for compressing and decompressing when sending or +// receiving messages. +type Compressor interface { + // Compress writes the data written to wc to w after compressing it. If an + // error occurs while initializing the compressor, that error is returned + // instead. + Compress(w io.Writer) (io.WriteCloser, error) + // Decompress reads data from r, decompresses it, and provides the + // uncompressed data via the returned io.Reader. If an error occurs while + // initializing the decompressor, that error is returned instead. + Decompress(r io.Reader) (io.Reader, error) + // Name is the name of the compression codec and is used to set the content + // coding header. The result must be static; the result cannot change + // between calls. + Name() string +} + +var registeredCompressor = make(map[string]Compressor) + +// RegisterCompressor registers the compressor with gRPC by its name. It can +// be activated when sending an RPC via grpc.UseCompressor(). It will be +// automatically accessed when receiving a message based on the content coding +// header. Servers also use it to send a response with the same encoding as +// the request. +// +// NOTE: this function must only be called during initialization time (i.e. in +// an init() function), and is not thread-safe. If multiple Compressors are +// registered with the same name, the one registered last will take effect. +func RegisterCompressor(c Compressor) { + registeredCompressor[c.Name()] = c +} + +// GetCompressor returns Compressor for the given compressor name. +func GetCompressor(name string) Compressor { + return registeredCompressor[name] +} + +// Codec defines the interface gRPC uses to encode and decode messages. Note +// that implementations of this interface must be thread safe; a Codec's +// methods can be called from concurrent goroutines. +type Codec interface { + // Marshal returns the wire format of v. + Marshal(v interface{}) ([]byte, error) + // Unmarshal parses the wire format into v. + Unmarshal(data []byte, v interface{}) error + // Name returns the name of the Codec implementation. The returned string + // will be used as part of content type in transmission. The result must be + // static; the result cannot change between calls. + Name() string +} + +var registeredCodecs = make(map[string]Codec) + +// RegisterCodec registers the provided Codec for use with all gRPC clients and +// servers. +// +// The Codec will be stored and looked up by result of its Name() method, which +// should match the content-subtype of the encoding handled by the Codec. This +// is case-insensitive, and is stored and looked up as lowercase. If the +// result of calling Name() is an empty string, RegisterCodec will panic. See +// Content-Type on +// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for +// more details. +// +// NOTE: this function must only be called during initialization time (i.e. in +// an init() function), and is not thread-safe. If multiple Compressors are +// registered with the same name, the one registered last will take effect. +func RegisterCodec(codec Codec) { + if codec == nil { + panic("cannot register a nil Codec") + } + contentSubtype := strings.ToLower(codec.Name()) + if contentSubtype == "" { + panic("cannot register Codec with empty string result for String()") + } + registeredCodecs[contentSubtype] = codec +} + +// GetCodec gets a registered Codec by content-subtype, or nil if no Codec is +// registered for the content-subtype. +// +// The content-subtype is expected to be lowercase. +func GetCodec(contentSubtype string) Codec { + return registeredCodecs[contentSubtype] +} diff --git a/vendor/google.golang.org/grpc/encoding/proto/proto.go b/vendor/google.golang.org/grpc/encoding/proto/proto.go new file mode 100644 index 00000000000..66b97a6f692 --- /dev/null +++ b/vendor/google.golang.org/grpc/encoding/proto/proto.go @@ -0,0 +1,110 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package proto defines the protobuf codec. Importing this package will +// register the codec. +package proto + +import ( + "math" + "sync" + + "github.com/golang/protobuf/proto" + "google.golang.org/grpc/encoding" +) + +// Name is the name registered for the proto compressor. +const Name = "proto" + +func init() { + encoding.RegisterCodec(codec{}) +} + +// codec is a Codec implementation with protobuf. It is the default codec for gRPC. +type codec struct{} + +type cachedProtoBuffer struct { + lastMarshaledSize uint32 + proto.Buffer +} + +func capToMaxInt32(val int) uint32 { + if val > math.MaxInt32 { + return uint32(math.MaxInt32) + } + return uint32(val) +} + +func marshal(v interface{}, cb *cachedProtoBuffer) ([]byte, error) { + protoMsg := v.(proto.Message) + newSlice := make([]byte, 0, cb.lastMarshaledSize) + + cb.SetBuf(newSlice) + cb.Reset() + if err := cb.Marshal(protoMsg); err != nil { + return nil, err + } + out := cb.Bytes() + cb.lastMarshaledSize = capToMaxInt32(len(out)) + return out, nil +} + +func (codec) Marshal(v interface{}) ([]byte, error) { + if pm, ok := v.(proto.Marshaler); ok { + // object can marshal itself, no need for buffer + return pm.Marshal() + } + + cb := protoBufferPool.Get().(*cachedProtoBuffer) + out, err := marshal(v, cb) + + // put back buffer and lose the ref to the slice + cb.SetBuf(nil) + protoBufferPool.Put(cb) + return out, err +} + +func (codec) Unmarshal(data []byte, v interface{}) error { + protoMsg := v.(proto.Message) + protoMsg.Reset() + + if pu, ok := protoMsg.(proto.Unmarshaler); ok { + // object can unmarshal itself, no need for buffer + return pu.Unmarshal(data) + } + + cb := protoBufferPool.Get().(*cachedProtoBuffer) + cb.SetBuf(data) + err := cb.Unmarshal(protoMsg) + cb.SetBuf(nil) + protoBufferPool.Put(cb) + return err +} + +func (codec) Name() string { + return Name +} + +var protoBufferPool = &sync.Pool{ + New: func() interface{} { + return &cachedProtoBuffer{ + Buffer: proto.Buffer{}, + lastMarshaledSize: 16, + } + }, +} diff --git a/vendor/google.golang.org/grpc/envconfig.go b/vendor/google.golang.org/grpc/envconfig.go new file mode 100644 index 00000000000..d50178e5171 --- /dev/null +++ b/vendor/google.golang.org/grpc/envconfig.go @@ -0,0 +1,37 @@ +/* + * + * Copyright 2018 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "os" + "strings" +) + +const ( + envConfigPrefix = "GRPC_GO_" + envConfigStickinessStr = envConfigPrefix + "STICKINESS" +) + +var ( + envConfigStickinessOn bool +) + +func init() { + envConfigStickinessOn = strings.EqualFold(os.Getenv(envConfigStickinessStr), "on") +} diff --git a/vendor/google.golang.org/grpc/go16.go b/vendor/google.golang.org/grpc/go16.go new file mode 100644 index 00000000000..535ee9356f3 --- /dev/null +++ b/vendor/google.golang.org/grpc/go16.go @@ -0,0 +1,70 @@ +// +build go1.6,!go1.7 + +/* + * + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "fmt" + "io" + "net" + "net/http" + + "golang.org/x/net/context" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + "google.golang.org/grpc/transport" +) + +// dialContext connects to the address on the named network. +func dialContext(ctx context.Context, network, address string) (net.Conn, error) { + return (&net.Dialer{Cancel: ctx.Done()}).Dial(network, address) +} + +func sendHTTPRequest(ctx context.Context, req *http.Request, conn net.Conn) error { + req.Cancel = ctx.Done() + if err := req.Write(conn); err != nil { + return fmt.Errorf("failed to write the HTTP request: %v", err) + } + return nil +} + +// toRPCErr converts an error into an error from the status package. +func toRPCErr(err error) error { + if err == nil || err == io.EOF { + return err + } + if _, ok := status.FromError(err); ok { + return err + } + switch e := err.(type) { + case transport.StreamError: + return status.Error(e.Code, e.Desc) + case transport.ConnectionError: + return status.Error(codes.Unavailable, e.Desc) + default: + switch err { + case context.DeadlineExceeded: + return status.Error(codes.DeadlineExceeded, err.Error()) + case context.Canceled: + return status.Error(codes.Canceled, err.Error()) + } + } + return status.Error(codes.Unknown, err.Error()) +} diff --git a/vendor/google.golang.org/grpc/go17.go b/vendor/google.golang.org/grpc/go17.go new file mode 100644 index 00000000000..ec676a93c39 --- /dev/null +++ b/vendor/google.golang.org/grpc/go17.go @@ -0,0 +1,71 @@ +// +build go1.7 + +/* + * + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "context" + "fmt" + "io" + "net" + "net/http" + + netctx "golang.org/x/net/context" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + "google.golang.org/grpc/transport" +) + +// dialContext connects to the address on the named network. +func dialContext(ctx context.Context, network, address string) (net.Conn, error) { + return (&net.Dialer{}).DialContext(ctx, network, address) +} + +func sendHTTPRequest(ctx context.Context, req *http.Request, conn net.Conn) error { + req = req.WithContext(ctx) + if err := req.Write(conn); err != nil { + return fmt.Errorf("failed to write the HTTP request: %v", err) + } + return nil +} + +// toRPCErr converts an error into an error from the status package. +func toRPCErr(err error) error { + if err == nil || err == io.EOF { + return err + } + if _, ok := status.FromError(err); ok { + return err + } + switch e := err.(type) { + case transport.StreamError: + return status.Error(e.Code, e.Desc) + case transport.ConnectionError: + return status.Error(codes.Unavailable, e.Desc) + default: + switch err { + case context.DeadlineExceeded, netctx.DeadlineExceeded: + return status.Error(codes.DeadlineExceeded, err.Error()) + case context.Canceled, netctx.Canceled: + return status.Error(codes.Canceled, err.Error()) + } + } + return status.Error(codes.Unknown, err.Error()) +} diff --git a/vendor/google.golang.org/grpc/grpclb.go b/vendor/google.golang.org/grpc/grpclb.go new file mode 100644 index 00000000000..bc2b4452558 --- /dev/null +++ b/vendor/google.golang.org/grpc/grpclb.go @@ -0,0 +1,341 @@ +/* + * + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "strconv" + "strings" + "sync" + "time" + + "golang.org/x/net/context" + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/connectivity" + lbpb "google.golang.org/grpc/grpclb/grpc_lb_v1/messages" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/resolver" +) + +const ( + lbTokeyKey = "lb-token" + defaultFallbackTimeout = 10 * time.Second + grpclbName = "grpclb" +) + +func convertDuration(d *lbpb.Duration) time.Duration { + if d == nil { + return 0 + } + return time.Duration(d.Seconds)*time.Second + time.Duration(d.Nanos)*time.Nanosecond +} + +// Client API for LoadBalancer service. +// Mostly copied from generated pb.go file. +// To avoid circular dependency. +type loadBalancerClient struct { + cc *ClientConn +} + +func (c *loadBalancerClient) BalanceLoad(ctx context.Context, opts ...CallOption) (*balanceLoadClientStream, error) { + desc := &StreamDesc{ + StreamName: "BalanceLoad", + ServerStreams: true, + ClientStreams: true, + } + stream, err := c.cc.NewStream(ctx, desc, "/grpc.lb.v1.LoadBalancer/BalanceLoad", opts...) + if err != nil { + return nil, err + } + x := &balanceLoadClientStream{stream} + return x, nil +} + +type balanceLoadClientStream struct { + ClientStream +} + +func (x *balanceLoadClientStream) Send(m *lbpb.LoadBalanceRequest) error { + return x.ClientStream.SendMsg(m) +} + +func (x *balanceLoadClientStream) Recv() (*lbpb.LoadBalanceResponse, error) { + m := new(lbpb.LoadBalanceResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func init() { + balancer.Register(newLBBuilder()) +} + +// newLBBuilder creates a builder for grpclb. +func newLBBuilder() balancer.Builder { + return NewLBBuilderWithFallbackTimeout(defaultFallbackTimeout) +} + +// NewLBBuilderWithFallbackTimeout creates a grpclb builder with the given +// fallbackTimeout. If no response is received from the remote balancer within +// fallbackTimeout, the backend addresses from the resolved address list will be +// used. +// +// Only call this function when a non-default fallback timeout is needed. +func NewLBBuilderWithFallbackTimeout(fallbackTimeout time.Duration) balancer.Builder { + return &lbBuilder{ + fallbackTimeout: fallbackTimeout, + } +} + +type lbBuilder struct { + fallbackTimeout time.Duration +} + +func (b *lbBuilder) Name() string { + return grpclbName +} + +func (b *lbBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) balancer.Balancer { + // This generates a manual resolver builder with a random scheme. This + // scheme will be used to dial to remote LB, so we can send filtered address + // updates to remote LB ClientConn using this manual resolver. + scheme := "grpclb_internal_" + strconv.FormatInt(time.Now().UnixNano(), 36) + r := &lbManualResolver{scheme: scheme, ccb: cc} + + var target string + targetSplitted := strings.Split(cc.Target(), ":///") + if len(targetSplitted) < 2 { + target = cc.Target() + } else { + target = targetSplitted[1] + } + + lb := &lbBalancer{ + cc: newLBCacheClientConn(cc), + target: target, + opt: opt, + fallbackTimeout: b.fallbackTimeout, + doneCh: make(chan struct{}), + + manualResolver: r, + csEvltr: &connectivityStateEvaluator{}, + subConns: make(map[resolver.Address]balancer.SubConn), + scStates: make(map[balancer.SubConn]connectivity.State), + picker: &errPicker{err: balancer.ErrNoSubConnAvailable}, + clientStats: &rpcStats{}, + } + + return lb +} + +type lbBalancer struct { + cc *lbCacheClientConn + target string + opt balancer.BuildOptions + fallbackTimeout time.Duration + doneCh chan struct{} + + // manualResolver is used in the remote LB ClientConn inside grpclb. When + // resolved address updates are received by grpclb, filtered updates will be + // send to remote LB ClientConn through this resolver. + manualResolver *lbManualResolver + // The ClientConn to talk to the remote balancer. + ccRemoteLB *ClientConn + + // Support client side load reporting. Each picker gets a reference to this, + // and will update its content. + clientStats *rpcStats + + mu sync.Mutex // guards everything following. + // The full server list including drops, used to check if the newly received + // serverList contains anything new. Each generate picker will also have + // reference to this list to do the first layer pick. + fullServerList []*lbpb.Server + // All backends addresses, with metadata set to nil. This list contains all + // backend addresses in the same order and with the same duplicates as in + // serverlist. When generating picker, a SubConn slice with the same order + // but with only READY SCs will be gerenated. + backendAddrs []resolver.Address + // Roundrobin functionalities. + csEvltr *connectivityStateEvaluator + state connectivity.State + subConns map[resolver.Address]balancer.SubConn // Used to new/remove SubConn. + scStates map[balancer.SubConn]connectivity.State // Used to filter READY SubConns. + picker balancer.Picker + // Support fallback to resolved backend addresses if there's no response + // from remote balancer within fallbackTimeout. + fallbackTimerExpired bool + serverListReceived bool + // resolvedBackendAddrs is resolvedAddrs minus remote balancers. It's set + // when resolved address updates are received, and read in the goroutine + // handling fallback. + resolvedBackendAddrs []resolver.Address +} + +// regeneratePicker takes a snapshot of the balancer, and generates a picker from +// it. The picker +// - always returns ErrTransientFailure if the balancer is in TransientFailure, +// - does two layer roundrobin pick otherwise. +// Caller must hold lb.mu. +func (lb *lbBalancer) regeneratePicker() { + if lb.state == connectivity.TransientFailure { + lb.picker = &errPicker{err: balancer.ErrTransientFailure} + return + } + var readySCs []balancer.SubConn + for _, a := range lb.backendAddrs { + if sc, ok := lb.subConns[a]; ok { + if st, ok := lb.scStates[sc]; ok && st == connectivity.Ready { + readySCs = append(readySCs, sc) + } + } + } + + if len(lb.fullServerList) <= 0 { + if len(readySCs) <= 0 { + lb.picker = &errPicker{err: balancer.ErrNoSubConnAvailable} + return + } + lb.picker = &rrPicker{subConns: readySCs} + return + } + lb.picker = &lbPicker{ + serverList: lb.fullServerList, + subConns: readySCs, + stats: lb.clientStats, + } +} + +func (lb *lbBalancer) HandleSubConnStateChange(sc balancer.SubConn, s connectivity.State) { + grpclog.Infof("lbBalancer: handle SubConn state change: %p, %v", sc, s) + lb.mu.Lock() + defer lb.mu.Unlock() + + oldS, ok := lb.scStates[sc] + if !ok { + grpclog.Infof("lbBalancer: got state changes for an unknown SubConn: %p, %v", sc, s) + return + } + lb.scStates[sc] = s + switch s { + case connectivity.Idle: + sc.Connect() + case connectivity.Shutdown: + // When an address was removed by resolver, b called RemoveSubConn but + // kept the sc's state in scStates. Remove state for this sc here. + delete(lb.scStates, sc) + } + + oldAggrState := lb.state + lb.state = lb.csEvltr.recordTransition(oldS, s) + + // Regenerate picker when one of the following happens: + // - this sc became ready from not-ready + // - this sc became not-ready from ready + // - the aggregated state of balancer became TransientFailure from non-TransientFailure + // - the aggregated state of balancer became non-TransientFailure from TransientFailure + if (oldS == connectivity.Ready) != (s == connectivity.Ready) || + (lb.state == connectivity.TransientFailure) != (oldAggrState == connectivity.TransientFailure) { + lb.regeneratePicker() + } + + lb.cc.UpdateBalancerState(lb.state, lb.picker) +} + +// fallbackToBackendsAfter blocks for fallbackTimeout and falls back to use +// resolved backends (backends received from resolver, not from remote balancer) +// if no connection to remote balancers was successful. +func (lb *lbBalancer) fallbackToBackendsAfter(fallbackTimeout time.Duration) { + timer := time.NewTimer(fallbackTimeout) + defer timer.Stop() + select { + case <-timer.C: + case <-lb.doneCh: + return + } + lb.mu.Lock() + if lb.serverListReceived { + lb.mu.Unlock() + return + } + lb.fallbackTimerExpired = true + lb.refreshSubConns(lb.resolvedBackendAddrs) + lb.mu.Unlock() +} + +// HandleResolvedAddrs sends the updated remoteLB addresses to remoteLB +// clientConn. The remoteLB clientConn will handle creating/removing remoteLB +// connections. +func (lb *lbBalancer) HandleResolvedAddrs(addrs []resolver.Address, err error) { + grpclog.Infof("lbBalancer: handleResolvedResult: %+v", addrs) + if len(addrs) <= 0 { + return + } + + var remoteBalancerAddrs, backendAddrs []resolver.Address + for _, a := range addrs { + if a.Type == resolver.GRPCLB { + remoteBalancerAddrs = append(remoteBalancerAddrs, a) + } else { + backendAddrs = append(backendAddrs, a) + } + } + + if lb.ccRemoteLB == nil { + if len(remoteBalancerAddrs) <= 0 { + grpclog.Errorf("grpclb: no remote balancer address is available, should never happen") + return + } + // First time receiving resolved addresses, create a cc to remote + // balancers. + lb.dialRemoteLB(remoteBalancerAddrs[0].ServerName) + // Start the fallback goroutine. + go lb.fallbackToBackendsAfter(lb.fallbackTimeout) + } + + // cc to remote balancers uses lb.manualResolver. Send the updated remote + // balancer addresses to it through manualResolver. + lb.manualResolver.NewAddress(remoteBalancerAddrs) + + lb.mu.Lock() + lb.resolvedBackendAddrs = backendAddrs + // If serverListReceived is true, connection to remote balancer was + // successful and there's no need to do fallback anymore. + // If fallbackTimerExpired is false, fallback hasn't happened yet. + if !lb.serverListReceived && lb.fallbackTimerExpired { + // This means we received a new list of resolved backends, and we are + // still in fallback mode. Need to update the list of backends we are + // using to the new list of backends. + lb.refreshSubConns(lb.resolvedBackendAddrs) + } + lb.mu.Unlock() +} + +func (lb *lbBalancer) Close() { + select { + case <-lb.doneCh: + return + default: + } + close(lb.doneCh) + if lb.ccRemoteLB != nil { + lb.ccRemoteLB.Close() + } + lb.cc.close() +} diff --git a/vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/messages/messages.pb.go b/vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/messages/messages.pb.go new file mode 100644 index 00000000000..b3b32b48e86 --- /dev/null +++ b/vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/messages/messages.pb.go @@ -0,0 +1,799 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: grpc_lb_v1/messages/messages.proto + +package messages // import "google.golang.org/grpc/grpclb/grpc_lb_v1/messages" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type Duration struct { + // Signed seconds of the span of time. Must be from -315,576,000,000 + // to +315,576,000,000 inclusive. + Seconds int64 `protobuf:"varint,1,opt,name=seconds" json:"seconds,omitempty"` + // Signed fractions of a second at nanosecond resolution of the span + // of time. Durations less than one second are represented with a 0 + // `seconds` field and a positive or negative `nanos` field. For durations + // of one second or more, a non-zero value for the `nanos` field must be + // of the same sign as the `seconds` field. Must be from -999,999,999 + // to +999,999,999 inclusive. + Nanos int32 `protobuf:"varint,2,opt,name=nanos" json:"nanos,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Duration) Reset() { *m = Duration{} } +func (m *Duration) String() string { return proto.CompactTextString(m) } +func (*Duration) ProtoMessage() {} +func (*Duration) Descriptor() ([]byte, []int) { + return fileDescriptor_messages_b81c731f0e83edbd, []int{0} +} +func (m *Duration) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Duration.Unmarshal(m, b) +} +func (m *Duration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Duration.Marshal(b, m, deterministic) +} +func (dst *Duration) XXX_Merge(src proto.Message) { + xxx_messageInfo_Duration.Merge(dst, src) +} +func (m *Duration) XXX_Size() int { + return xxx_messageInfo_Duration.Size(m) +} +func (m *Duration) XXX_DiscardUnknown() { + xxx_messageInfo_Duration.DiscardUnknown(m) +} + +var xxx_messageInfo_Duration proto.InternalMessageInfo + +func (m *Duration) GetSeconds() int64 { + if m != nil { + return m.Seconds + } + return 0 +} + +func (m *Duration) GetNanos() int32 { + if m != nil { + return m.Nanos + } + return 0 +} + +type Timestamp struct { + // Represents seconds of UTC time since Unix epoch + // 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to + // 9999-12-31T23:59:59Z inclusive. + Seconds int64 `protobuf:"varint,1,opt,name=seconds" json:"seconds,omitempty"` + // Non-negative fractions of a second at nanosecond resolution. Negative + // second values with fractions must still have non-negative nanos values + // that count forward in time. Must be from 0 to 999,999,999 + // inclusive. + Nanos int32 `protobuf:"varint,2,opt,name=nanos" json:"nanos,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Timestamp) Reset() { *m = Timestamp{} } +func (m *Timestamp) String() string { return proto.CompactTextString(m) } +func (*Timestamp) ProtoMessage() {} +func (*Timestamp) Descriptor() ([]byte, []int) { + return fileDescriptor_messages_b81c731f0e83edbd, []int{1} +} +func (m *Timestamp) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Timestamp.Unmarshal(m, b) +} +func (m *Timestamp) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Timestamp.Marshal(b, m, deterministic) +} +func (dst *Timestamp) XXX_Merge(src proto.Message) { + xxx_messageInfo_Timestamp.Merge(dst, src) +} +func (m *Timestamp) XXX_Size() int { + return xxx_messageInfo_Timestamp.Size(m) +} +func (m *Timestamp) XXX_DiscardUnknown() { + xxx_messageInfo_Timestamp.DiscardUnknown(m) +} + +var xxx_messageInfo_Timestamp proto.InternalMessageInfo + +func (m *Timestamp) GetSeconds() int64 { + if m != nil { + return m.Seconds + } + return 0 +} + +func (m *Timestamp) GetNanos() int32 { + if m != nil { + return m.Nanos + } + return 0 +} + +type LoadBalanceRequest struct { + // Types that are valid to be assigned to LoadBalanceRequestType: + // *LoadBalanceRequest_InitialRequest + // *LoadBalanceRequest_ClientStats + LoadBalanceRequestType isLoadBalanceRequest_LoadBalanceRequestType `protobuf_oneof:"load_balance_request_type"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *LoadBalanceRequest) Reset() { *m = LoadBalanceRequest{} } +func (m *LoadBalanceRequest) String() string { return proto.CompactTextString(m) } +func (*LoadBalanceRequest) ProtoMessage() {} +func (*LoadBalanceRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_messages_b81c731f0e83edbd, []int{2} +} +func (m *LoadBalanceRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_LoadBalanceRequest.Unmarshal(m, b) +} +func (m *LoadBalanceRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_LoadBalanceRequest.Marshal(b, m, deterministic) +} +func (dst *LoadBalanceRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_LoadBalanceRequest.Merge(dst, src) +} +func (m *LoadBalanceRequest) XXX_Size() int { + return xxx_messageInfo_LoadBalanceRequest.Size(m) +} +func (m *LoadBalanceRequest) XXX_DiscardUnknown() { + xxx_messageInfo_LoadBalanceRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_LoadBalanceRequest proto.InternalMessageInfo + +type isLoadBalanceRequest_LoadBalanceRequestType interface { + isLoadBalanceRequest_LoadBalanceRequestType() +} + +type LoadBalanceRequest_InitialRequest struct { + InitialRequest *InitialLoadBalanceRequest `protobuf:"bytes,1,opt,name=initial_request,json=initialRequest,oneof"` +} +type LoadBalanceRequest_ClientStats struct { + ClientStats *ClientStats `protobuf:"bytes,2,opt,name=client_stats,json=clientStats,oneof"` +} + +func (*LoadBalanceRequest_InitialRequest) isLoadBalanceRequest_LoadBalanceRequestType() {} +func (*LoadBalanceRequest_ClientStats) isLoadBalanceRequest_LoadBalanceRequestType() {} + +func (m *LoadBalanceRequest) GetLoadBalanceRequestType() isLoadBalanceRequest_LoadBalanceRequestType { + if m != nil { + return m.LoadBalanceRequestType + } + return nil +} + +func (m *LoadBalanceRequest) GetInitialRequest() *InitialLoadBalanceRequest { + if x, ok := m.GetLoadBalanceRequestType().(*LoadBalanceRequest_InitialRequest); ok { + return x.InitialRequest + } + return nil +} + +func (m *LoadBalanceRequest) GetClientStats() *ClientStats { + if x, ok := m.GetLoadBalanceRequestType().(*LoadBalanceRequest_ClientStats); ok { + return x.ClientStats + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*LoadBalanceRequest) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _LoadBalanceRequest_OneofMarshaler, _LoadBalanceRequest_OneofUnmarshaler, _LoadBalanceRequest_OneofSizer, []interface{}{ + (*LoadBalanceRequest_InitialRequest)(nil), + (*LoadBalanceRequest_ClientStats)(nil), + } +} + +func _LoadBalanceRequest_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*LoadBalanceRequest) + // load_balance_request_type + switch x := m.LoadBalanceRequestType.(type) { + case *LoadBalanceRequest_InitialRequest: + b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.InitialRequest); err != nil { + return err + } + case *LoadBalanceRequest_ClientStats: + b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.ClientStats); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("LoadBalanceRequest.LoadBalanceRequestType has unexpected type %T", x) + } + return nil +} + +func _LoadBalanceRequest_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*LoadBalanceRequest) + switch tag { + case 1: // load_balance_request_type.initial_request + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(InitialLoadBalanceRequest) + err := b.DecodeMessage(msg) + m.LoadBalanceRequestType = &LoadBalanceRequest_InitialRequest{msg} + return true, err + case 2: // load_balance_request_type.client_stats + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(ClientStats) + err := b.DecodeMessage(msg) + m.LoadBalanceRequestType = &LoadBalanceRequest_ClientStats{msg} + return true, err + default: + return false, nil + } +} + +func _LoadBalanceRequest_OneofSizer(msg proto.Message) (n int) { + m := msg.(*LoadBalanceRequest) + // load_balance_request_type + switch x := m.LoadBalanceRequestType.(type) { + case *LoadBalanceRequest_InitialRequest: + s := proto.Size(x.InitialRequest) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case *LoadBalanceRequest_ClientStats: + s := proto.Size(x.ClientStats) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +type InitialLoadBalanceRequest struct { + // Name of load balanced service (IE, balancer.service.com) + // length should be less than 256 bytes. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *InitialLoadBalanceRequest) Reset() { *m = InitialLoadBalanceRequest{} } +func (m *InitialLoadBalanceRequest) String() string { return proto.CompactTextString(m) } +func (*InitialLoadBalanceRequest) ProtoMessage() {} +func (*InitialLoadBalanceRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_messages_b81c731f0e83edbd, []int{3} +} +func (m *InitialLoadBalanceRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_InitialLoadBalanceRequest.Unmarshal(m, b) +} +func (m *InitialLoadBalanceRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_InitialLoadBalanceRequest.Marshal(b, m, deterministic) +} +func (dst *InitialLoadBalanceRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_InitialLoadBalanceRequest.Merge(dst, src) +} +func (m *InitialLoadBalanceRequest) XXX_Size() int { + return xxx_messageInfo_InitialLoadBalanceRequest.Size(m) +} +func (m *InitialLoadBalanceRequest) XXX_DiscardUnknown() { + xxx_messageInfo_InitialLoadBalanceRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_InitialLoadBalanceRequest proto.InternalMessageInfo + +func (m *InitialLoadBalanceRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +// Contains client level statistics that are useful to load balancing. Each +// count except the timestamp should be reset to zero after reporting the stats. +type ClientStats struct { + // The timestamp of generating the report. + Timestamp *Timestamp `protobuf:"bytes,1,opt,name=timestamp" json:"timestamp,omitempty"` + // The total number of RPCs that started. + NumCallsStarted int64 `protobuf:"varint,2,opt,name=num_calls_started,json=numCallsStarted" json:"num_calls_started,omitempty"` + // The total number of RPCs that finished. + NumCallsFinished int64 `protobuf:"varint,3,opt,name=num_calls_finished,json=numCallsFinished" json:"num_calls_finished,omitempty"` + // The total number of RPCs that were dropped by the client because of rate + // limiting. + NumCallsFinishedWithDropForRateLimiting int64 `protobuf:"varint,4,opt,name=num_calls_finished_with_drop_for_rate_limiting,json=numCallsFinishedWithDropForRateLimiting" json:"num_calls_finished_with_drop_for_rate_limiting,omitempty"` + // The total number of RPCs that were dropped by the client because of load + // balancing. + NumCallsFinishedWithDropForLoadBalancing int64 `protobuf:"varint,5,opt,name=num_calls_finished_with_drop_for_load_balancing,json=numCallsFinishedWithDropForLoadBalancing" json:"num_calls_finished_with_drop_for_load_balancing,omitempty"` + // The total number of RPCs that failed to reach a server except dropped RPCs. + NumCallsFinishedWithClientFailedToSend int64 `protobuf:"varint,6,opt,name=num_calls_finished_with_client_failed_to_send,json=numCallsFinishedWithClientFailedToSend" json:"num_calls_finished_with_client_failed_to_send,omitempty"` + // The total number of RPCs that finished and are known to have been received + // by a server. + NumCallsFinishedKnownReceived int64 `protobuf:"varint,7,opt,name=num_calls_finished_known_received,json=numCallsFinishedKnownReceived" json:"num_calls_finished_known_received,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ClientStats) Reset() { *m = ClientStats{} } +func (m *ClientStats) String() string { return proto.CompactTextString(m) } +func (*ClientStats) ProtoMessage() {} +func (*ClientStats) Descriptor() ([]byte, []int) { + return fileDescriptor_messages_b81c731f0e83edbd, []int{4} +} +func (m *ClientStats) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ClientStats.Unmarshal(m, b) +} +func (m *ClientStats) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ClientStats.Marshal(b, m, deterministic) +} +func (dst *ClientStats) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClientStats.Merge(dst, src) +} +func (m *ClientStats) XXX_Size() int { + return xxx_messageInfo_ClientStats.Size(m) +} +func (m *ClientStats) XXX_DiscardUnknown() { + xxx_messageInfo_ClientStats.DiscardUnknown(m) +} + +var xxx_messageInfo_ClientStats proto.InternalMessageInfo + +func (m *ClientStats) GetTimestamp() *Timestamp { + if m != nil { + return m.Timestamp + } + return nil +} + +func (m *ClientStats) GetNumCallsStarted() int64 { + if m != nil { + return m.NumCallsStarted + } + return 0 +} + +func (m *ClientStats) GetNumCallsFinished() int64 { + if m != nil { + return m.NumCallsFinished + } + return 0 +} + +func (m *ClientStats) GetNumCallsFinishedWithDropForRateLimiting() int64 { + if m != nil { + return m.NumCallsFinishedWithDropForRateLimiting + } + return 0 +} + +func (m *ClientStats) GetNumCallsFinishedWithDropForLoadBalancing() int64 { + if m != nil { + return m.NumCallsFinishedWithDropForLoadBalancing + } + return 0 +} + +func (m *ClientStats) GetNumCallsFinishedWithClientFailedToSend() int64 { + if m != nil { + return m.NumCallsFinishedWithClientFailedToSend + } + return 0 +} + +func (m *ClientStats) GetNumCallsFinishedKnownReceived() int64 { + if m != nil { + return m.NumCallsFinishedKnownReceived + } + return 0 +} + +type LoadBalanceResponse struct { + // Types that are valid to be assigned to LoadBalanceResponseType: + // *LoadBalanceResponse_InitialResponse + // *LoadBalanceResponse_ServerList + LoadBalanceResponseType isLoadBalanceResponse_LoadBalanceResponseType `protobuf_oneof:"load_balance_response_type"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *LoadBalanceResponse) Reset() { *m = LoadBalanceResponse{} } +func (m *LoadBalanceResponse) String() string { return proto.CompactTextString(m) } +func (*LoadBalanceResponse) ProtoMessage() {} +func (*LoadBalanceResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_messages_b81c731f0e83edbd, []int{5} +} +func (m *LoadBalanceResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_LoadBalanceResponse.Unmarshal(m, b) +} +func (m *LoadBalanceResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_LoadBalanceResponse.Marshal(b, m, deterministic) +} +func (dst *LoadBalanceResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_LoadBalanceResponse.Merge(dst, src) +} +func (m *LoadBalanceResponse) XXX_Size() int { + return xxx_messageInfo_LoadBalanceResponse.Size(m) +} +func (m *LoadBalanceResponse) XXX_DiscardUnknown() { + xxx_messageInfo_LoadBalanceResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_LoadBalanceResponse proto.InternalMessageInfo + +type isLoadBalanceResponse_LoadBalanceResponseType interface { + isLoadBalanceResponse_LoadBalanceResponseType() +} + +type LoadBalanceResponse_InitialResponse struct { + InitialResponse *InitialLoadBalanceResponse `protobuf:"bytes,1,opt,name=initial_response,json=initialResponse,oneof"` +} +type LoadBalanceResponse_ServerList struct { + ServerList *ServerList `protobuf:"bytes,2,opt,name=server_list,json=serverList,oneof"` +} + +func (*LoadBalanceResponse_InitialResponse) isLoadBalanceResponse_LoadBalanceResponseType() {} +func (*LoadBalanceResponse_ServerList) isLoadBalanceResponse_LoadBalanceResponseType() {} + +func (m *LoadBalanceResponse) GetLoadBalanceResponseType() isLoadBalanceResponse_LoadBalanceResponseType { + if m != nil { + return m.LoadBalanceResponseType + } + return nil +} + +func (m *LoadBalanceResponse) GetInitialResponse() *InitialLoadBalanceResponse { + if x, ok := m.GetLoadBalanceResponseType().(*LoadBalanceResponse_InitialResponse); ok { + return x.InitialResponse + } + return nil +} + +func (m *LoadBalanceResponse) GetServerList() *ServerList { + if x, ok := m.GetLoadBalanceResponseType().(*LoadBalanceResponse_ServerList); ok { + return x.ServerList + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*LoadBalanceResponse) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _LoadBalanceResponse_OneofMarshaler, _LoadBalanceResponse_OneofUnmarshaler, _LoadBalanceResponse_OneofSizer, []interface{}{ + (*LoadBalanceResponse_InitialResponse)(nil), + (*LoadBalanceResponse_ServerList)(nil), + } +} + +func _LoadBalanceResponse_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*LoadBalanceResponse) + // load_balance_response_type + switch x := m.LoadBalanceResponseType.(type) { + case *LoadBalanceResponse_InitialResponse: + b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.InitialResponse); err != nil { + return err + } + case *LoadBalanceResponse_ServerList: + b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.ServerList); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("LoadBalanceResponse.LoadBalanceResponseType has unexpected type %T", x) + } + return nil +} + +func _LoadBalanceResponse_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*LoadBalanceResponse) + switch tag { + case 1: // load_balance_response_type.initial_response + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(InitialLoadBalanceResponse) + err := b.DecodeMessage(msg) + m.LoadBalanceResponseType = &LoadBalanceResponse_InitialResponse{msg} + return true, err + case 2: // load_balance_response_type.server_list + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(ServerList) + err := b.DecodeMessage(msg) + m.LoadBalanceResponseType = &LoadBalanceResponse_ServerList{msg} + return true, err + default: + return false, nil + } +} + +func _LoadBalanceResponse_OneofSizer(msg proto.Message) (n int) { + m := msg.(*LoadBalanceResponse) + // load_balance_response_type + switch x := m.LoadBalanceResponseType.(type) { + case *LoadBalanceResponse_InitialResponse: + s := proto.Size(x.InitialResponse) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case *LoadBalanceResponse_ServerList: + s := proto.Size(x.ServerList) + n += 1 // tag and wire + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +type InitialLoadBalanceResponse struct { + // This is an application layer redirect that indicates the client should use + // the specified server for load balancing. When this field is non-empty in + // the response, the client should open a separate connection to the + // load_balancer_delegate and call the BalanceLoad method. Its length should + // be less than 64 bytes. + LoadBalancerDelegate string `protobuf:"bytes,1,opt,name=load_balancer_delegate,json=loadBalancerDelegate" json:"load_balancer_delegate,omitempty"` + // This interval defines how often the client should send the client stats + // to the load balancer. Stats should only be reported when the duration is + // positive. + ClientStatsReportInterval *Duration `protobuf:"bytes,2,opt,name=client_stats_report_interval,json=clientStatsReportInterval" json:"client_stats_report_interval,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *InitialLoadBalanceResponse) Reset() { *m = InitialLoadBalanceResponse{} } +func (m *InitialLoadBalanceResponse) String() string { return proto.CompactTextString(m) } +func (*InitialLoadBalanceResponse) ProtoMessage() {} +func (*InitialLoadBalanceResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_messages_b81c731f0e83edbd, []int{6} +} +func (m *InitialLoadBalanceResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_InitialLoadBalanceResponse.Unmarshal(m, b) +} +func (m *InitialLoadBalanceResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_InitialLoadBalanceResponse.Marshal(b, m, deterministic) +} +func (dst *InitialLoadBalanceResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_InitialLoadBalanceResponse.Merge(dst, src) +} +func (m *InitialLoadBalanceResponse) XXX_Size() int { + return xxx_messageInfo_InitialLoadBalanceResponse.Size(m) +} +func (m *InitialLoadBalanceResponse) XXX_DiscardUnknown() { + xxx_messageInfo_InitialLoadBalanceResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_InitialLoadBalanceResponse proto.InternalMessageInfo + +func (m *InitialLoadBalanceResponse) GetLoadBalancerDelegate() string { + if m != nil { + return m.LoadBalancerDelegate + } + return "" +} + +func (m *InitialLoadBalanceResponse) GetClientStatsReportInterval() *Duration { + if m != nil { + return m.ClientStatsReportInterval + } + return nil +} + +type ServerList struct { + // Contains a list of servers selected by the load balancer. The list will + // be updated when server resolutions change or as needed to balance load + // across more servers. The client should consume the server list in order + // unless instructed otherwise via the client_config. + Servers []*Server `protobuf:"bytes,1,rep,name=servers" json:"servers,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ServerList) Reset() { *m = ServerList{} } +func (m *ServerList) String() string { return proto.CompactTextString(m) } +func (*ServerList) ProtoMessage() {} +func (*ServerList) Descriptor() ([]byte, []int) { + return fileDescriptor_messages_b81c731f0e83edbd, []int{7} +} +func (m *ServerList) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ServerList.Unmarshal(m, b) +} +func (m *ServerList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ServerList.Marshal(b, m, deterministic) +} +func (dst *ServerList) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServerList.Merge(dst, src) +} +func (m *ServerList) XXX_Size() int { + return xxx_messageInfo_ServerList.Size(m) +} +func (m *ServerList) XXX_DiscardUnknown() { + xxx_messageInfo_ServerList.DiscardUnknown(m) +} + +var xxx_messageInfo_ServerList proto.InternalMessageInfo + +func (m *ServerList) GetServers() []*Server { + if m != nil { + return m.Servers + } + return nil +} + +// Contains server information. When none of the [drop_for_*] fields are true, +// use the other fields. When drop_for_rate_limiting is true, ignore all other +// fields. Use drop_for_load_balancing only when it is true and +// drop_for_rate_limiting is false. +type Server struct { + // A resolved address for the server, serialized in network-byte-order. It may + // either be an IPv4 or IPv6 address. + IpAddress []byte `protobuf:"bytes,1,opt,name=ip_address,json=ipAddress,proto3" json:"ip_address,omitempty"` + // A resolved port number for the server. + Port int32 `protobuf:"varint,2,opt,name=port" json:"port,omitempty"` + // An opaque but printable token given to the frontend for each pick. All + // frontend requests for that pick must include the token in its initial + // metadata. The token is used by the backend to verify the request and to + // allow the backend to report load to the gRPC LB system. + // + // Its length is variable but less than 50 bytes. + LoadBalanceToken string `protobuf:"bytes,3,opt,name=load_balance_token,json=loadBalanceToken" json:"load_balance_token,omitempty"` + // Indicates whether this particular request should be dropped by the client + // for rate limiting. + DropForRateLimiting bool `protobuf:"varint,4,opt,name=drop_for_rate_limiting,json=dropForRateLimiting" json:"drop_for_rate_limiting,omitempty"` + // Indicates whether this particular request should be dropped by the client + // for load balancing. + DropForLoadBalancing bool `protobuf:"varint,5,opt,name=drop_for_load_balancing,json=dropForLoadBalancing" json:"drop_for_load_balancing,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Server) Reset() { *m = Server{} } +func (m *Server) String() string { return proto.CompactTextString(m) } +func (*Server) ProtoMessage() {} +func (*Server) Descriptor() ([]byte, []int) { + return fileDescriptor_messages_b81c731f0e83edbd, []int{8} +} +func (m *Server) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Server.Unmarshal(m, b) +} +func (m *Server) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Server.Marshal(b, m, deterministic) +} +func (dst *Server) XXX_Merge(src proto.Message) { + xxx_messageInfo_Server.Merge(dst, src) +} +func (m *Server) XXX_Size() int { + return xxx_messageInfo_Server.Size(m) +} +func (m *Server) XXX_DiscardUnknown() { + xxx_messageInfo_Server.DiscardUnknown(m) +} + +var xxx_messageInfo_Server proto.InternalMessageInfo + +func (m *Server) GetIpAddress() []byte { + if m != nil { + return m.IpAddress + } + return nil +} + +func (m *Server) GetPort() int32 { + if m != nil { + return m.Port + } + return 0 +} + +func (m *Server) GetLoadBalanceToken() string { + if m != nil { + return m.LoadBalanceToken + } + return "" +} + +func (m *Server) GetDropForRateLimiting() bool { + if m != nil { + return m.DropForRateLimiting + } + return false +} + +func (m *Server) GetDropForLoadBalancing() bool { + if m != nil { + return m.DropForLoadBalancing + } + return false +} + +func init() { + proto.RegisterType((*Duration)(nil), "grpc.lb.v1.Duration") + proto.RegisterType((*Timestamp)(nil), "grpc.lb.v1.Timestamp") + proto.RegisterType((*LoadBalanceRequest)(nil), "grpc.lb.v1.LoadBalanceRequest") + proto.RegisterType((*InitialLoadBalanceRequest)(nil), "grpc.lb.v1.InitialLoadBalanceRequest") + proto.RegisterType((*ClientStats)(nil), "grpc.lb.v1.ClientStats") + proto.RegisterType((*LoadBalanceResponse)(nil), "grpc.lb.v1.LoadBalanceResponse") + proto.RegisterType((*InitialLoadBalanceResponse)(nil), "grpc.lb.v1.InitialLoadBalanceResponse") + proto.RegisterType((*ServerList)(nil), "grpc.lb.v1.ServerList") + proto.RegisterType((*Server)(nil), "grpc.lb.v1.Server") +} + +func init() { + proto.RegisterFile("grpc_lb_v1/messages/messages.proto", fileDescriptor_messages_b81c731f0e83edbd) +} + +var fileDescriptor_messages_b81c731f0e83edbd = []byte{ + // 731 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x55, 0xdd, 0x4e, 0x1b, 0x39, + 0x14, 0x26, 0x9b, 0x00, 0xc9, 0x09, 0x5a, 0xb2, 0x26, 0x0b, 0x81, 0x05, 0x89, 0x1d, 0x69, 0xd9, + 0x68, 0xc5, 0x4e, 0x04, 0xd9, 0xbd, 0xe8, 0xcf, 0x45, 0x1b, 0x10, 0x0a, 0x2d, 0x17, 0x95, 0x43, + 0x55, 0xa9, 0x52, 0x65, 0x39, 0x19, 0x33, 0x58, 0x38, 0xf6, 0xd4, 0x76, 0x82, 0xfa, 0x08, 0x7d, + 0x94, 0x3e, 0x46, 0xd5, 0x67, 0xe8, 0xfb, 0x54, 0xe3, 0x99, 0xc9, 0x0c, 0x10, 0x40, 0xbd, 0x89, + 0xec, 0xe3, 0xef, 0x7c, 0xdf, 0xf1, 0x89, 0xbf, 0x33, 0xe0, 0x85, 0x3a, 0x1a, 0x11, 0x31, 0x24, + 0xd3, 0x83, 0xce, 0x98, 0x19, 0x43, 0x43, 0x66, 0x66, 0x0b, 0x3f, 0xd2, 0xca, 0x2a, 0x04, 0x31, + 0xc6, 0x17, 0x43, 0x7f, 0x7a, 0xe0, 0x3d, 0x85, 0xea, 0xf1, 0x44, 0x53, 0xcb, 0x95, 0x44, 0x2d, + 0x58, 0x36, 0x6c, 0xa4, 0x64, 0x60, 0x5a, 0xa5, 0xdd, 0x52, 0xbb, 0x8c, 0xb3, 0x2d, 0x6a, 0xc2, + 0xa2, 0xa4, 0x52, 0x99, 0xd6, 0x2f, 0xbb, 0xa5, 0xf6, 0x22, 0x4e, 0x36, 0xde, 0x33, 0xa8, 0x9d, + 0xf3, 0x31, 0x33, 0x96, 0x8e, 0xa3, 0x9f, 0x4e, 0xfe, 0x5a, 0x02, 0x74, 0xa6, 0x68, 0xd0, 0xa3, + 0x82, 0xca, 0x11, 0xc3, 0xec, 0xe3, 0x84, 0x19, 0x8b, 0xde, 0xc0, 0x2a, 0x97, 0xdc, 0x72, 0x2a, + 0x88, 0x4e, 0x42, 0x8e, 0xae, 0x7e, 0xf8, 0x97, 0x9f, 0x57, 0xed, 0x9f, 0x26, 0x90, 0xbb, 0xf9, + 0xfd, 0x05, 0xfc, 0x6b, 0x9a, 0x9f, 0x31, 0x3e, 0x87, 0x95, 0x91, 0xe0, 0x4c, 0x5a, 0x62, 0x2c, + 0xb5, 0x49, 0x15, 0xf5, 0xc3, 0x8d, 0x22, 0xdd, 0x91, 0x3b, 0x1f, 0xc4, 0xc7, 0xfd, 0x05, 0x5c, + 0x1f, 0xe5, 0xdb, 0xde, 0x1f, 0xb0, 0x29, 0x14, 0x0d, 0xc8, 0x30, 0x91, 0xc9, 0x8a, 0x22, 0xf6, + 0x53, 0xc4, 0xbc, 0x0e, 0x6c, 0xde, 0x5b, 0x09, 0x42, 0x50, 0x91, 0x74, 0xcc, 0x5c, 0xf9, 0x35, + 0xec, 0xd6, 0xde, 0xe7, 0x0a, 0xd4, 0x0b, 0x62, 0xa8, 0x0b, 0x35, 0x9b, 0x75, 0x30, 0xbd, 0xe7, + 0xef, 0xc5, 0xc2, 0x66, 0xed, 0xc5, 0x39, 0x0e, 0xfd, 0x03, 0xbf, 0xc9, 0xc9, 0x98, 0x8c, 0xa8, + 0x10, 0x26, 0xbe, 0x93, 0xb6, 0x2c, 0x70, 0xb7, 0x2a, 0xe3, 0x55, 0x39, 0x19, 0x1f, 0xc5, 0xf1, + 0x41, 0x12, 0x46, 0xfb, 0x80, 0x72, 0xec, 0x05, 0x97, 0xdc, 0x5c, 0xb2, 0xa0, 0x55, 0x76, 0xe0, + 0x46, 0x06, 0x3e, 0x49, 0xe3, 0x88, 0x80, 0x7f, 0x17, 0x4d, 0xae, 0xb9, 0xbd, 0x24, 0x81, 0x56, + 0x11, 0xb9, 0x50, 0x9a, 0x68, 0x6a, 0x19, 0x11, 0x7c, 0xcc, 0x2d, 0x97, 0x61, 0xab, 0xe2, 0x98, + 0xfe, 0xbe, 0xcd, 0xf4, 0x8e, 0xdb, 0xcb, 0x63, 0xad, 0xa2, 0x13, 0xa5, 0x31, 0xb5, 0xec, 0x2c, + 0x85, 0x23, 0x0a, 0x9d, 0x47, 0x05, 0x0a, 0xed, 0x8e, 0x15, 0x16, 0x9d, 0x42, 0xfb, 0x01, 0x85, + 0xbc, 0xf7, 0xb1, 0xc4, 0x07, 0xf8, 0xf7, 0x3e, 0x89, 0xf4, 0x19, 0x5c, 0x50, 0x2e, 0x58, 0x40, + 0xac, 0x22, 0x86, 0xc9, 0xa0, 0xb5, 0xe4, 0x04, 0xf6, 0xe6, 0x09, 0x24, 0x7f, 0xd5, 0x89, 0xc3, + 0x9f, 0xab, 0x01, 0x93, 0x01, 0xea, 0xc3, 0x9f, 0x73, 0xe8, 0xaf, 0xa4, 0xba, 0x96, 0x44, 0xb3, + 0x11, 0xe3, 0x53, 0x16, 0xb4, 0x96, 0x1d, 0xe5, 0xce, 0x6d, 0xca, 0xd7, 0x31, 0x0a, 0xa7, 0x20, + 0xef, 0x5b, 0x09, 0xd6, 0x6e, 0x3c, 0x1b, 0x13, 0x29, 0x69, 0x18, 0x1a, 0x40, 0x23, 0x77, 0x40, + 0x12, 0x4b, 0x9f, 0xc6, 0xde, 0x63, 0x16, 0x48, 0xd0, 0xfd, 0x05, 0xbc, 0x3a, 0xf3, 0x40, 0x4a, + 0xfa, 0x04, 0xea, 0x86, 0xe9, 0x29, 0xd3, 0x44, 0x70, 0x63, 0x53, 0x0f, 0xac, 0x17, 0xf9, 0x06, + 0xee, 0xf8, 0x8c, 0x3b, 0x0f, 0x81, 0x99, 0xed, 0x7a, 0xdb, 0xb0, 0x75, 0xcb, 0x01, 0x09, 0x67, + 0x62, 0x81, 0x2f, 0x25, 0xd8, 0xba, 0xbf, 0x14, 0xf4, 0x1f, 0xac, 0x17, 0x93, 0x35, 0x09, 0x98, + 0x60, 0x21, 0xb5, 0x99, 0x2d, 0x9a, 0x22, 0x4f, 0xd2, 0xc7, 0xe9, 0x19, 0x7a, 0x0b, 0xdb, 0x45, + 0xcb, 0x12, 0xcd, 0x22, 0xa5, 0x2d, 0xe1, 0xd2, 0x32, 0x3d, 0xa5, 0x22, 0x2d, 0xbf, 0x59, 0x2c, + 0x3f, 0x1b, 0x62, 0x78, 0xb3, 0xe0, 0x5e, 0xec, 0xf2, 0x4e, 0xd3, 0x34, 0xef, 0x05, 0x40, 0x7e, + 0x4b, 0xb4, 0x1f, 0x0f, 0xac, 0x78, 0x17, 0x0f, 0xac, 0x72, 0xbb, 0x7e, 0x88, 0xee, 0xb6, 0x03, + 0x67, 0x90, 0x57, 0x95, 0x6a, 0xb9, 0x51, 0xf1, 0xbe, 0x97, 0x60, 0x29, 0x39, 0x41, 0x3b, 0x00, + 0x3c, 0x22, 0x34, 0x08, 0x34, 0x33, 0xc9, 0xc8, 0x5b, 0xc1, 0x35, 0x1e, 0xbd, 0x4c, 0x02, 0xb1, + 0xfb, 0x63, 0xed, 0x74, 0xe6, 0xb9, 0x75, 0x6c, 0xc6, 0x1b, 0x9d, 0xb4, 0xea, 0x8a, 0x49, 0x67, + 0xc6, 0x1a, 0x6e, 0x14, 0x1a, 0x71, 0x1e, 0xc7, 0x51, 0x17, 0xd6, 0x1f, 0x30, 0x5d, 0x15, 0xaf, + 0x05, 0x73, 0x0c, 0xf6, 0x3f, 0x6c, 0x3c, 0x64, 0xa4, 0x2a, 0x6e, 0x06, 0x73, 0x4c, 0xd3, 0xeb, + 0xbe, 0x3f, 0x08, 0x95, 0x0a, 0x05, 0xf3, 0x43, 0x25, 0xa8, 0x0c, 0x7d, 0xa5, 0xc3, 0x4e, 0xdc, + 0x0d, 0xf7, 0x23, 0x86, 0x9d, 0x39, 0x5f, 0x95, 0xe1, 0x92, 0xfb, 0x9a, 0x74, 0x7f, 0x04, 0x00, + 0x00, 0xff, 0xff, 0x8e, 0xd0, 0x70, 0xb7, 0x73, 0x06, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/messages/messages.proto b/vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/messages/messages.proto new file mode 100644 index 00000000000..42d99c109fe --- /dev/null +++ b/vendor/google.golang.org/grpc/grpclb/grpc_lb_v1/messages/messages.proto @@ -0,0 +1,155 @@ +// Copyright 2016 gRPC authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package grpc.lb.v1; +option go_package = "google.golang.org/grpc/grpclb/grpc_lb_v1/messages"; + +message Duration { + // Signed seconds of the span of time. Must be from -315,576,000,000 + // to +315,576,000,000 inclusive. + int64 seconds = 1; + + // Signed fractions of a second at nanosecond resolution of the span + // of time. Durations less than one second are represented with a 0 + // `seconds` field and a positive or negative `nanos` field. For durations + // of one second or more, a non-zero value for the `nanos` field must be + // of the same sign as the `seconds` field. Must be from -999,999,999 + // to +999,999,999 inclusive. + int32 nanos = 2; +} + +message Timestamp { + // Represents seconds of UTC time since Unix epoch + // 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to + // 9999-12-31T23:59:59Z inclusive. + int64 seconds = 1; + + // Non-negative fractions of a second at nanosecond resolution. Negative + // second values with fractions must still have non-negative nanos values + // that count forward in time. Must be from 0 to 999,999,999 + // inclusive. + int32 nanos = 2; +} + +message LoadBalanceRequest { + oneof load_balance_request_type { + // This message should be sent on the first request to the load balancer. + InitialLoadBalanceRequest initial_request = 1; + + // The client stats should be periodically reported to the load balancer + // based on the duration defined in the InitialLoadBalanceResponse. + ClientStats client_stats = 2; + } +} + +message InitialLoadBalanceRequest { + // Name of load balanced service (IE, balancer.service.com) + // length should be less than 256 bytes. + string name = 1; +} + +// Contains client level statistics that are useful to load balancing. Each +// count except the timestamp should be reset to zero after reporting the stats. +message ClientStats { + // The timestamp of generating the report. + Timestamp timestamp = 1; + + // The total number of RPCs that started. + int64 num_calls_started = 2; + + // The total number of RPCs that finished. + int64 num_calls_finished = 3; + + // The total number of RPCs that were dropped by the client because of rate + // limiting. + int64 num_calls_finished_with_drop_for_rate_limiting = 4; + + // The total number of RPCs that were dropped by the client because of load + // balancing. + int64 num_calls_finished_with_drop_for_load_balancing = 5; + + // The total number of RPCs that failed to reach a server except dropped RPCs. + int64 num_calls_finished_with_client_failed_to_send = 6; + + // The total number of RPCs that finished and are known to have been received + // by a server. + int64 num_calls_finished_known_received = 7; +} + +message LoadBalanceResponse { + oneof load_balance_response_type { + // This message should be sent on the first response to the client. + InitialLoadBalanceResponse initial_response = 1; + + // Contains the list of servers selected by the load balancer. The client + // should send requests to these servers in the specified order. + ServerList server_list = 2; + } +} + +message InitialLoadBalanceResponse { + // This is an application layer redirect that indicates the client should use + // the specified server for load balancing. When this field is non-empty in + // the response, the client should open a separate connection to the + // load_balancer_delegate and call the BalanceLoad method. Its length should + // be less than 64 bytes. + string load_balancer_delegate = 1; + + // This interval defines how often the client should send the client stats + // to the load balancer. Stats should only be reported when the duration is + // positive. + Duration client_stats_report_interval = 2; +} + +message ServerList { + // Contains a list of servers selected by the load balancer. The list will + // be updated when server resolutions change or as needed to balance load + // across more servers. The client should consume the server list in order + // unless instructed otherwise via the client_config. + repeated Server servers = 1; + + // Was google.protobuf.Duration expiration_interval. + reserved 3; +} + +// Contains server information. When none of the [drop_for_*] fields are true, +// use the other fields. When drop_for_rate_limiting is true, ignore all other +// fields. Use drop_for_load_balancing only when it is true and +// drop_for_rate_limiting is false. +message Server { + // A resolved address for the server, serialized in network-byte-order. It may + // either be an IPv4 or IPv6 address. + bytes ip_address = 1; + + // A resolved port number for the server. + int32 port = 2; + + // An opaque but printable token given to the frontend for each pick. All + // frontend requests for that pick must include the token in its initial + // metadata. The token is used by the backend to verify the request and to + // allow the backend to report load to the gRPC LB system. + // + // Its length is variable but less than 50 bytes. + string load_balance_token = 3; + + // Indicates whether this particular request should be dropped by the client + // for rate limiting. + bool drop_for_rate_limiting = 4; + + // Indicates whether this particular request should be dropped by the client + // for load balancing. + bool drop_for_load_balancing = 5; +} diff --git a/vendor/google.golang.org/grpc/grpclb_picker.go b/vendor/google.golang.org/grpc/grpclb_picker.go new file mode 100644 index 00000000000..872c7ccea0e --- /dev/null +++ b/vendor/google.golang.org/grpc/grpclb_picker.go @@ -0,0 +1,159 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "sync" + "sync/atomic" + + "golang.org/x/net/context" + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/codes" + lbpb "google.golang.org/grpc/grpclb/grpc_lb_v1/messages" + "google.golang.org/grpc/status" +) + +type rpcStats struct { + NumCallsStarted int64 + NumCallsFinished int64 + NumCallsFinishedWithDropForRateLimiting int64 + NumCallsFinishedWithDropForLoadBalancing int64 + NumCallsFinishedWithClientFailedToSend int64 + NumCallsFinishedKnownReceived int64 +} + +// toClientStats converts rpcStats to lbpb.ClientStats, and clears rpcStats. +func (s *rpcStats) toClientStats() *lbpb.ClientStats { + stats := &lbpb.ClientStats{ + NumCallsStarted: atomic.SwapInt64(&s.NumCallsStarted, 0), + NumCallsFinished: atomic.SwapInt64(&s.NumCallsFinished, 0), + NumCallsFinishedWithDropForRateLimiting: atomic.SwapInt64(&s.NumCallsFinishedWithDropForRateLimiting, 0), + NumCallsFinishedWithDropForLoadBalancing: atomic.SwapInt64(&s.NumCallsFinishedWithDropForLoadBalancing, 0), + NumCallsFinishedWithClientFailedToSend: atomic.SwapInt64(&s.NumCallsFinishedWithClientFailedToSend, 0), + NumCallsFinishedKnownReceived: atomic.SwapInt64(&s.NumCallsFinishedKnownReceived, 0), + } + return stats +} + +func (s *rpcStats) dropForRateLimiting() { + atomic.AddInt64(&s.NumCallsStarted, 1) + atomic.AddInt64(&s.NumCallsFinishedWithDropForRateLimiting, 1) + atomic.AddInt64(&s.NumCallsFinished, 1) +} + +func (s *rpcStats) dropForLoadBalancing() { + atomic.AddInt64(&s.NumCallsStarted, 1) + atomic.AddInt64(&s.NumCallsFinishedWithDropForLoadBalancing, 1) + atomic.AddInt64(&s.NumCallsFinished, 1) +} + +func (s *rpcStats) failedToSend() { + atomic.AddInt64(&s.NumCallsStarted, 1) + atomic.AddInt64(&s.NumCallsFinishedWithClientFailedToSend, 1) + atomic.AddInt64(&s.NumCallsFinished, 1) +} + +func (s *rpcStats) knownReceived() { + atomic.AddInt64(&s.NumCallsStarted, 1) + atomic.AddInt64(&s.NumCallsFinishedKnownReceived, 1) + atomic.AddInt64(&s.NumCallsFinished, 1) +} + +type errPicker struct { + // Pick always returns this err. + err error +} + +func (p *errPicker) Pick(ctx context.Context, opts balancer.PickOptions) (balancer.SubConn, func(balancer.DoneInfo), error) { + return nil, nil, p.err +} + +// rrPicker does roundrobin on subConns. It's typically used when there's no +// response from remote balancer, and grpclb falls back to the resolved +// backends. +// +// It guaranteed that len(subConns) > 0. +type rrPicker struct { + mu sync.Mutex + subConns []balancer.SubConn // The subConns that were READY when taking the snapshot. + subConnsNext int +} + +func (p *rrPicker) Pick(ctx context.Context, opts balancer.PickOptions) (balancer.SubConn, func(balancer.DoneInfo), error) { + p.mu.Lock() + defer p.mu.Unlock() + sc := p.subConns[p.subConnsNext] + p.subConnsNext = (p.subConnsNext + 1) % len(p.subConns) + return sc, nil, nil +} + +// lbPicker does two layers of picks: +// +// First layer: roundrobin on all servers in serverList, including drops and backends. +// - If it picks a drop, the RPC will fail as being dropped. +// - If it picks a backend, do a second layer pick to pick the real backend. +// +// Second layer: roundrobin on all READY backends. +// +// It's guaranteed that len(serverList) > 0. +type lbPicker struct { + mu sync.Mutex + serverList []*lbpb.Server + serverListNext int + subConns []balancer.SubConn // The subConns that were READY when taking the snapshot. + subConnsNext int + + stats *rpcStats +} + +func (p *lbPicker) Pick(ctx context.Context, opts balancer.PickOptions) (balancer.SubConn, func(balancer.DoneInfo), error) { + p.mu.Lock() + defer p.mu.Unlock() + + // Layer one roundrobin on serverList. + s := p.serverList[p.serverListNext] + p.serverListNext = (p.serverListNext + 1) % len(p.serverList) + + // If it's a drop, return an error and fail the RPC. + if s.DropForRateLimiting { + p.stats.dropForRateLimiting() + return nil, nil, status.Errorf(codes.Unavailable, "request dropped by grpclb") + } + if s.DropForLoadBalancing { + p.stats.dropForLoadBalancing() + return nil, nil, status.Errorf(codes.Unavailable, "request dropped by grpclb") + } + + // If not a drop but there's no ready subConns. + if len(p.subConns) <= 0 { + return nil, nil, balancer.ErrNoSubConnAvailable + } + + // Return the next ready subConn in the list, also collect rpc stats. + sc := p.subConns[p.subConnsNext] + p.subConnsNext = (p.subConnsNext + 1) % len(p.subConns) + done := func(info balancer.DoneInfo) { + if !info.BytesSent { + p.stats.failedToSend() + } else if info.BytesReceived { + p.stats.knownReceived() + } + } + return sc, done, nil +} diff --git a/vendor/google.golang.org/grpc/grpclb_remote_balancer.go b/vendor/google.golang.org/grpc/grpclb_remote_balancer.go new file mode 100644 index 00000000000..b8dd4f18ce5 --- /dev/null +++ b/vendor/google.golang.org/grpc/grpclb_remote_balancer.go @@ -0,0 +1,266 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "fmt" + "net" + "reflect" + "time" + + "golang.org/x/net/context" + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/channelz" + + "google.golang.org/grpc/connectivity" + lbpb "google.golang.org/grpc/grpclb/grpc_lb_v1/messages" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/resolver" +) + +// processServerList updates balaner's internal state, create/remove SubConns +// and regenerates picker using the received serverList. +func (lb *lbBalancer) processServerList(l *lbpb.ServerList) { + grpclog.Infof("lbBalancer: processing server list: %+v", l) + lb.mu.Lock() + defer lb.mu.Unlock() + + // Set serverListReceived to true so fallback will not take effect if it has + // not hit timeout. + lb.serverListReceived = true + + // If the new server list == old server list, do nothing. + if reflect.DeepEqual(lb.fullServerList, l.Servers) { + grpclog.Infof("lbBalancer: new serverlist same as the previous one, ignoring") + return + } + lb.fullServerList = l.Servers + + var backendAddrs []resolver.Address + for _, s := range l.Servers { + if s.DropForLoadBalancing || s.DropForRateLimiting { + continue + } + + md := metadata.Pairs(lbTokeyKey, s.LoadBalanceToken) + ip := net.IP(s.IpAddress) + ipStr := ip.String() + if ip.To4() == nil { + // Add square brackets to ipv6 addresses, otherwise net.Dial() and + // net.SplitHostPort() will return too many colons error. + ipStr = fmt.Sprintf("[%s]", ipStr) + } + addr := resolver.Address{ + Addr: fmt.Sprintf("%s:%d", ipStr, s.Port), + Metadata: &md, + } + + backendAddrs = append(backendAddrs, addr) + } + + // Call refreshSubConns to create/remove SubConns. + lb.refreshSubConns(backendAddrs) + // Regenerate and update picker no matter if there's update on backends (if + // any SubConn will be newed/removed). Because since the full serverList was + // different, there might be updates in drops or pick weights(different + // number of duplicates). We need to update picker with the fulllist. + // + // Now with cache, even if SubConn was newed/removed, there might be no + // state changes. + lb.regeneratePicker() + lb.cc.UpdateBalancerState(lb.state, lb.picker) +} + +// refreshSubConns creates/removes SubConns with backendAddrs. It returns a bool +// indicating whether the backendAddrs are different from the cached +// backendAddrs (whether any SubConn was newed/removed). +// Caller must hold lb.mu. +func (lb *lbBalancer) refreshSubConns(backendAddrs []resolver.Address) bool { + lb.backendAddrs = nil + var backendsUpdated bool + // addrsSet is the set converted from backendAddrs, it's used to quick + // lookup for an address. + addrsSet := make(map[resolver.Address]struct{}) + // Create new SubConns. + for _, addr := range backendAddrs { + addrWithoutMD := addr + addrWithoutMD.Metadata = nil + addrsSet[addrWithoutMD] = struct{}{} + lb.backendAddrs = append(lb.backendAddrs, addrWithoutMD) + + if _, ok := lb.subConns[addrWithoutMD]; !ok { + backendsUpdated = true + + // Use addrWithMD to create the SubConn. + sc, err := lb.cc.NewSubConn([]resolver.Address{addr}, balancer.NewSubConnOptions{}) + if err != nil { + grpclog.Warningf("roundrobinBalancer: failed to create new SubConn: %v", err) + continue + } + lb.subConns[addrWithoutMD] = sc // Use the addr without MD as key for the map. + if _, ok := lb.scStates[sc]; !ok { + // Only set state of new sc to IDLE. The state could already be + // READY for cached SubConns. + lb.scStates[sc] = connectivity.Idle + } + sc.Connect() + } + } + + for a, sc := range lb.subConns { + // a was removed by resolver. + if _, ok := addrsSet[a]; !ok { + backendsUpdated = true + + lb.cc.RemoveSubConn(sc) + delete(lb.subConns, a) + // Keep the state of this sc in b.scStates until sc's state becomes Shutdown. + // The entry will be deleted in HandleSubConnStateChange. + } + } + + return backendsUpdated +} + +func (lb *lbBalancer) readServerList(s *balanceLoadClientStream) error { + for { + reply, err := s.Recv() + if err != nil { + return fmt.Errorf("grpclb: failed to recv server list: %v", err) + } + if serverList := reply.GetServerList(); serverList != nil { + lb.processServerList(serverList) + } + } +} + +func (lb *lbBalancer) sendLoadReport(s *balanceLoadClientStream, interval time.Duration) { + ticker := time.NewTicker(interval) + defer ticker.Stop() + for { + select { + case <-ticker.C: + case <-s.Context().Done(): + return + } + stats := lb.clientStats.toClientStats() + t := time.Now() + stats.Timestamp = &lbpb.Timestamp{ + Seconds: t.Unix(), + Nanos: int32(t.Nanosecond()), + } + if err := s.Send(&lbpb.LoadBalanceRequest{ + LoadBalanceRequestType: &lbpb.LoadBalanceRequest_ClientStats{ + ClientStats: stats, + }, + }); err != nil { + return + } + } +} + +func (lb *lbBalancer) callRemoteBalancer() error { + lbClient := &loadBalancerClient{cc: lb.ccRemoteLB} + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + stream, err := lbClient.BalanceLoad(ctx, FailFast(false)) + if err != nil { + return fmt.Errorf("grpclb: failed to perform RPC to the remote balancer %v", err) + } + + // grpclb handshake on the stream. + initReq := &lbpb.LoadBalanceRequest{ + LoadBalanceRequestType: &lbpb.LoadBalanceRequest_InitialRequest{ + InitialRequest: &lbpb.InitialLoadBalanceRequest{ + Name: lb.target, + }, + }, + } + if err := stream.Send(initReq); err != nil { + return fmt.Errorf("grpclb: failed to send init request: %v", err) + } + reply, err := stream.Recv() + if err != nil { + return fmt.Errorf("grpclb: failed to recv init response: %v", err) + } + initResp := reply.GetInitialResponse() + if initResp == nil { + return fmt.Errorf("grpclb: reply from remote balancer did not include initial response") + } + if initResp.LoadBalancerDelegate != "" { + return fmt.Errorf("grpclb: Delegation is not supported") + } + + go func() { + if d := convertDuration(initResp.ClientStatsReportInterval); d > 0 { + lb.sendLoadReport(stream, d) + } + }() + return lb.readServerList(stream) +} + +func (lb *lbBalancer) watchRemoteBalancer() { + for { + err := lb.callRemoteBalancer() + select { + case <-lb.doneCh: + return + default: + if err != nil { + grpclog.Error(err) + } + } + + } +} + +func (lb *lbBalancer) dialRemoteLB(remoteLBName string) { + var dopts []DialOption + if creds := lb.opt.DialCreds; creds != nil { + if err := creds.OverrideServerName(remoteLBName); err == nil { + dopts = append(dopts, WithTransportCredentials(creds)) + } else { + grpclog.Warningf("grpclb: failed to override the server name in the credentials: %v, using Insecure", err) + dopts = append(dopts, WithInsecure()) + } + } else { + dopts = append(dopts, WithInsecure()) + } + if lb.opt.Dialer != nil { + // WithDialer takes a different type of function, so we instead use a + // special DialOption here. + dopts = append(dopts, withContextDialer(lb.opt.Dialer)) + } + // Explicitly set pickfirst as the balancer. + dopts = append(dopts, WithBalancerName(PickFirstBalancerName)) + dopts = append(dopts, withResolverBuilder(lb.manualResolver)) + if channelz.IsOn() { + dopts = append(dopts, WithChannelzParentID(lb.opt.ChannelzParentID)) + } + + // DialContext using manualResolver.Scheme, which is a random scheme generated + // when init grpclb. The target name is not important. + cc, err := DialContext(context.Background(), "grpclb:///grpclb.server", dopts...) + if err != nil { + grpclog.Fatalf("failed to dial: %v", err) + } + lb.ccRemoteLB = cc + go lb.watchRemoteBalancer() +} diff --git a/vendor/google.golang.org/grpc/grpclb_util.go b/vendor/google.golang.org/grpc/grpclb_util.go new file mode 100644 index 00000000000..063ba9d8590 --- /dev/null +++ b/vendor/google.golang.org/grpc/grpclb_util.go @@ -0,0 +1,214 @@ +/* + * + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "fmt" + "sync" + "time" + + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/resolver" +) + +// The parent ClientConn should re-resolve when grpclb loses connection to the +// remote balancer. When the ClientConn inside grpclb gets a TransientFailure, +// it calls lbManualResolver.ResolveNow(), which calls parent ClientConn's +// ResolveNow, and eventually results in re-resolve happening in parent +// ClientConn's resolver (DNS for example). +// +// parent +// ClientConn +// +-----------------------------------------------------------------+ +// | parent +---------------------------------+ | +// | DNS ClientConn | grpclb | | +// | resolver balancerWrapper | | | +// | + + | grpclb grpclb | | +// | | | | ManualResolver ClientConn | | +// | | | | + + | | +// | | | | | | Transient | | +// | | | | | | Failure | | +// | | | | | <--------- | | | +// | | | <--------------- | ResolveNow | | | +// | | <--------- | ResolveNow | | | | | +// | | ResolveNow | | | | | | +// | | | | | | | | +// | + + | + + | | +// | +---------------------------------+ | +// +-----------------------------------------------------------------+ + +// lbManualResolver is used by the ClientConn inside grpclb. It's a manual +// resolver with a special ResolveNow() function. +// +// When ResolveNow() is called, it calls ResolveNow() on the parent ClientConn, +// so when grpclb client lose contact with remote balancers, the parent +// ClientConn's resolver will re-resolve. +type lbManualResolver struct { + scheme string + ccr resolver.ClientConn + + ccb balancer.ClientConn +} + +func (r *lbManualResolver) Build(_ resolver.Target, cc resolver.ClientConn, _ resolver.BuildOption) (resolver.Resolver, error) { + r.ccr = cc + return r, nil +} + +func (r *lbManualResolver) Scheme() string { + return r.scheme +} + +// ResolveNow calls resolveNow on the parent ClientConn. +func (r *lbManualResolver) ResolveNow(o resolver.ResolveNowOption) { + r.ccb.ResolveNow(o) +} + +// Close is a noop for Resolver. +func (*lbManualResolver) Close() {} + +// NewAddress calls cc.NewAddress. +func (r *lbManualResolver) NewAddress(addrs []resolver.Address) { + r.ccr.NewAddress(addrs) +} + +// NewServiceConfig calls cc.NewServiceConfig. +func (r *lbManualResolver) NewServiceConfig(sc string) { + r.ccr.NewServiceConfig(sc) +} + +const subConnCacheTime = time.Second * 10 + +// lbCacheClientConn is a wrapper balancer.ClientConn with a SubConn cache. +// SubConns will be kept in cache for subConnCacheTime before being removed. +// +// Its new and remove methods are updated to do cache first. +type lbCacheClientConn struct { + cc balancer.ClientConn + timeout time.Duration + + mu sync.Mutex + // subConnCache only keeps subConns that are being deleted. + subConnCache map[resolver.Address]*subConnCacheEntry + subConnToAddr map[balancer.SubConn]resolver.Address +} + +type subConnCacheEntry struct { + sc balancer.SubConn + + cancel func() + abortDeleting bool +} + +func newLBCacheClientConn(cc balancer.ClientConn) *lbCacheClientConn { + return &lbCacheClientConn{ + cc: cc, + timeout: subConnCacheTime, + subConnCache: make(map[resolver.Address]*subConnCacheEntry), + subConnToAddr: make(map[balancer.SubConn]resolver.Address), + } +} + +func (ccc *lbCacheClientConn) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) { + if len(addrs) != 1 { + return nil, fmt.Errorf("grpclb calling NewSubConn with addrs of length %v", len(addrs)) + } + addrWithoutMD := addrs[0] + addrWithoutMD.Metadata = nil + + ccc.mu.Lock() + defer ccc.mu.Unlock() + if entry, ok := ccc.subConnCache[addrWithoutMD]; ok { + // If entry is in subConnCache, the SubConn was being deleted. + // cancel function will never be nil. + entry.cancel() + delete(ccc.subConnCache, addrWithoutMD) + return entry.sc, nil + } + + scNew, err := ccc.cc.NewSubConn(addrs, opts) + if err != nil { + return nil, err + } + + ccc.subConnToAddr[scNew] = addrWithoutMD + return scNew, nil +} + +func (ccc *lbCacheClientConn) RemoveSubConn(sc balancer.SubConn) { + ccc.mu.Lock() + defer ccc.mu.Unlock() + addr, ok := ccc.subConnToAddr[sc] + if !ok { + return + } + + if entry, ok := ccc.subConnCache[addr]; ok { + if entry.sc != sc { + // This could happen if NewSubConn was called multiple times for the + // same address, and those SubConns are all removed. We remove sc + // immediately here. + delete(ccc.subConnToAddr, sc) + ccc.cc.RemoveSubConn(sc) + } + return + } + + entry := &subConnCacheEntry{ + sc: sc, + } + ccc.subConnCache[addr] = entry + + timer := time.AfterFunc(ccc.timeout, func() { + ccc.mu.Lock() + if entry.abortDeleting { + return + } + ccc.cc.RemoveSubConn(sc) + delete(ccc.subConnToAddr, sc) + delete(ccc.subConnCache, addr) + ccc.mu.Unlock() + }) + entry.cancel = func() { + if !timer.Stop() { + // If stop was not successful, the timer has fired (this can only + // happen in a race). But the deleting function is blocked on ccc.mu + // because the mutex was held by the caller of this function. + // + // Set abortDeleting to true to abort the deleting function. When + // the lock is released, the deleting function will acquire the + // lock, check the value of abortDeleting and return. + entry.abortDeleting = true + } + } +} + +func (ccc *lbCacheClientConn) UpdateBalancerState(s connectivity.State, p balancer.Picker) { + ccc.cc.UpdateBalancerState(s, p) +} + +func (ccc *lbCacheClientConn) close() { + ccc.mu.Lock() + // Only cancel all existing timers. There's no need to remove SubConns. + for _, entry := range ccc.subConnCache { + entry.cancel() + } + ccc.mu.Unlock() +} diff --git a/vendor/google.golang.org/grpc/grpclog/grpclog.go b/vendor/google.golang.org/grpc/grpclog/grpclog.go new file mode 100644 index 00000000000..1fabb11e1ba --- /dev/null +++ b/vendor/google.golang.org/grpc/grpclog/grpclog.go @@ -0,0 +1,126 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package grpclog defines logging for grpc. +// +// All logs in transport package only go to verbose level 2. +// All logs in other packages in grpc are logged in spite of the verbosity level. +// +// In the default logger, +// severity level can be set by environment variable GRPC_GO_LOG_SEVERITY_LEVEL, +// verbosity level can be set by GRPC_GO_LOG_VERBOSITY_LEVEL. +package grpclog // import "google.golang.org/grpc/grpclog" + +import "os" + +var logger = newLoggerV2() + +// V reports whether verbosity level l is at least the requested verbose level. +func V(l int) bool { + return logger.V(l) +} + +// Info logs to the INFO log. +func Info(args ...interface{}) { + logger.Info(args...) +} + +// Infof logs to the INFO log. Arguments are handled in the manner of fmt.Printf. +func Infof(format string, args ...interface{}) { + logger.Infof(format, args...) +} + +// Infoln logs to the INFO log. Arguments are handled in the manner of fmt.Println. +func Infoln(args ...interface{}) { + logger.Infoln(args...) +} + +// Warning logs to the WARNING log. +func Warning(args ...interface{}) { + logger.Warning(args...) +} + +// Warningf logs to the WARNING log. Arguments are handled in the manner of fmt.Printf. +func Warningf(format string, args ...interface{}) { + logger.Warningf(format, args...) +} + +// Warningln logs to the WARNING log. Arguments are handled in the manner of fmt.Println. +func Warningln(args ...interface{}) { + logger.Warningln(args...) +} + +// Error logs to the ERROR log. +func Error(args ...interface{}) { + logger.Error(args...) +} + +// Errorf logs to the ERROR log. Arguments are handled in the manner of fmt.Printf. +func Errorf(format string, args ...interface{}) { + logger.Errorf(format, args...) +} + +// Errorln logs to the ERROR log. Arguments are handled in the manner of fmt.Println. +func Errorln(args ...interface{}) { + logger.Errorln(args...) +} + +// Fatal logs to the FATAL log. Arguments are handled in the manner of fmt.Print. +// It calls os.Exit() with exit code 1. +func Fatal(args ...interface{}) { + logger.Fatal(args...) + // Make sure fatal logs will exit. + os.Exit(1) +} + +// Fatalf logs to the FATAL log. Arguments are handled in the manner of fmt.Printf. +// It calles os.Exit() with exit code 1. +func Fatalf(format string, args ...interface{}) { + logger.Fatalf(format, args...) + // Make sure fatal logs will exit. + os.Exit(1) +} + +// Fatalln logs to the FATAL log. Arguments are handled in the manner of fmt.Println. +// It calle os.Exit()) with exit code 1. +func Fatalln(args ...interface{}) { + logger.Fatalln(args...) + // Make sure fatal logs will exit. + os.Exit(1) +} + +// Print prints to the logger. Arguments are handled in the manner of fmt.Print. +// +// Deprecated: use Info. +func Print(args ...interface{}) { + logger.Info(args...) +} + +// Printf prints to the logger. Arguments are handled in the manner of fmt.Printf. +// +// Deprecated: use Infof. +func Printf(format string, args ...interface{}) { + logger.Infof(format, args...) +} + +// Println prints to the logger. Arguments are handled in the manner of fmt.Println. +// +// Deprecated: use Infoln. +func Println(args ...interface{}) { + logger.Infoln(args...) +} diff --git a/vendor/google.golang.org/grpc/grpclog/logger.go b/vendor/google.golang.org/grpc/grpclog/logger.go new file mode 100644 index 00000000000..097494f710f --- /dev/null +++ b/vendor/google.golang.org/grpc/grpclog/logger.go @@ -0,0 +1,85 @@ +/* + * + * Copyright 2015 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpclog + +// Logger mimics golang's standard Logger as an interface. +// +// Deprecated: use LoggerV2. +type Logger interface { + Fatal(args ...interface{}) + Fatalf(format string, args ...interface{}) + Fatalln(args ...interface{}) + Print(args ...interface{}) + Printf(format string, args ...interface{}) + Println(args ...interface{}) +} + +// SetLogger sets the logger that is used in grpc. Call only from +// init() functions. +// +// Deprecated: use SetLoggerV2. +func SetLogger(l Logger) { + logger = &loggerWrapper{Logger: l} +} + +// loggerWrapper wraps Logger into a LoggerV2. +type loggerWrapper struct { + Logger +} + +func (g *loggerWrapper) Info(args ...interface{}) { + g.Logger.Print(args...) +} + +func (g *loggerWrapper) Infoln(args ...interface{}) { + g.Logger.Println(args...) +} + +func (g *loggerWrapper) Infof(format string, args ...interface{}) { + g.Logger.Printf(format, args...) +} + +func (g *loggerWrapper) Warning(args ...interface{}) { + g.Logger.Print(args...) +} + +func (g *loggerWrapper) Warningln(args ...interface{}) { + g.Logger.Println(args...) +} + +func (g *loggerWrapper) Warningf(format string, args ...interface{}) { + g.Logger.Printf(format, args...) +} + +func (g *loggerWrapper) Error(args ...interface{}) { + g.Logger.Print(args...) +} + +func (g *loggerWrapper) Errorln(args ...interface{}) { + g.Logger.Println(args...) +} + +func (g *loggerWrapper) Errorf(format string, args ...interface{}) { + g.Logger.Printf(format, args...) +} + +func (g *loggerWrapper) V(l int) bool { + // Returns true for all verbose level. + return true +} diff --git a/vendor/google.golang.org/grpc/grpclog/loggerv2.go b/vendor/google.golang.org/grpc/grpclog/loggerv2.go new file mode 100644 index 00000000000..d4932577695 --- /dev/null +++ b/vendor/google.golang.org/grpc/grpclog/loggerv2.go @@ -0,0 +1,195 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpclog + +import ( + "io" + "io/ioutil" + "log" + "os" + "strconv" +) + +// LoggerV2 does underlying logging work for grpclog. +type LoggerV2 interface { + // Info logs to INFO log. Arguments are handled in the manner of fmt.Print. + Info(args ...interface{}) + // Infoln logs to INFO log. Arguments are handled in the manner of fmt.Println. + Infoln(args ...interface{}) + // Infof logs to INFO log. Arguments are handled in the manner of fmt.Printf. + Infof(format string, args ...interface{}) + // Warning logs to WARNING log. Arguments are handled in the manner of fmt.Print. + Warning(args ...interface{}) + // Warningln logs to WARNING log. Arguments are handled in the manner of fmt.Println. + Warningln(args ...interface{}) + // Warningf logs to WARNING log. Arguments are handled in the manner of fmt.Printf. + Warningf(format string, args ...interface{}) + // Error logs to ERROR log. Arguments are handled in the manner of fmt.Print. + Error(args ...interface{}) + // Errorln logs to ERROR log. Arguments are handled in the manner of fmt.Println. + Errorln(args ...interface{}) + // Errorf logs to ERROR log. Arguments are handled in the manner of fmt.Printf. + Errorf(format string, args ...interface{}) + // Fatal logs to ERROR log. Arguments are handled in the manner of fmt.Print. + // gRPC ensures that all Fatal logs will exit with os.Exit(1). + // Implementations may also call os.Exit() with a non-zero exit code. + Fatal(args ...interface{}) + // Fatalln logs to ERROR log. Arguments are handled in the manner of fmt.Println. + // gRPC ensures that all Fatal logs will exit with os.Exit(1). + // Implementations may also call os.Exit() with a non-zero exit code. + Fatalln(args ...interface{}) + // Fatalf logs to ERROR log. Arguments are handled in the manner of fmt.Printf. + // gRPC ensures that all Fatal logs will exit with os.Exit(1). + // Implementations may also call os.Exit() with a non-zero exit code. + Fatalf(format string, args ...interface{}) + // V reports whether verbosity level l is at least the requested verbose level. + V(l int) bool +} + +// SetLoggerV2 sets logger that is used in grpc to a V2 logger. +// Not mutex-protected, should be called before any gRPC functions. +func SetLoggerV2(l LoggerV2) { + logger = l +} + +const ( + // infoLog indicates Info severity. + infoLog int = iota + // warningLog indicates Warning severity. + warningLog + // errorLog indicates Error severity. + errorLog + // fatalLog indicates Fatal severity. + fatalLog +) + +// severityName contains the string representation of each severity. +var severityName = []string{ + infoLog: "INFO", + warningLog: "WARNING", + errorLog: "ERROR", + fatalLog: "FATAL", +} + +// loggerT is the default logger used by grpclog. +type loggerT struct { + m []*log.Logger + v int +} + +// NewLoggerV2 creates a loggerV2 with the provided writers. +// Fatal logs will be written to errorW, warningW, infoW, followed by exit(1). +// Error logs will be written to errorW, warningW and infoW. +// Warning logs will be written to warningW and infoW. +// Info logs will be written to infoW. +func NewLoggerV2(infoW, warningW, errorW io.Writer) LoggerV2 { + return NewLoggerV2WithVerbosity(infoW, warningW, errorW, 0) +} + +// NewLoggerV2WithVerbosity creates a loggerV2 with the provided writers and +// verbosity level. +func NewLoggerV2WithVerbosity(infoW, warningW, errorW io.Writer, v int) LoggerV2 { + var m []*log.Logger + m = append(m, log.New(infoW, severityName[infoLog]+": ", log.LstdFlags)) + m = append(m, log.New(io.MultiWriter(infoW, warningW), severityName[warningLog]+": ", log.LstdFlags)) + ew := io.MultiWriter(infoW, warningW, errorW) // ew will be used for error and fatal. + m = append(m, log.New(ew, severityName[errorLog]+": ", log.LstdFlags)) + m = append(m, log.New(ew, severityName[fatalLog]+": ", log.LstdFlags)) + return &loggerT{m: m, v: v} +} + +// newLoggerV2 creates a loggerV2 to be used as default logger. +// All logs are written to stderr. +func newLoggerV2() LoggerV2 { + errorW := ioutil.Discard + warningW := ioutil.Discard + infoW := ioutil.Discard + + logLevel := os.Getenv("GRPC_GO_LOG_SEVERITY_LEVEL") + switch logLevel { + case "", "ERROR", "error": // If env is unset, set level to ERROR. + errorW = os.Stderr + case "WARNING", "warning": + warningW = os.Stderr + case "INFO", "info": + infoW = os.Stderr + } + + var v int + vLevel := os.Getenv("GRPC_GO_LOG_VERBOSITY_LEVEL") + if vl, err := strconv.Atoi(vLevel); err == nil { + v = vl + } + return NewLoggerV2WithVerbosity(infoW, warningW, errorW, v) +} + +func (g *loggerT) Info(args ...interface{}) { + g.m[infoLog].Print(args...) +} + +func (g *loggerT) Infoln(args ...interface{}) { + g.m[infoLog].Println(args...) +} + +func (g *loggerT) Infof(format string, args ...interface{}) { + g.m[infoLog].Printf(format, args...) +} + +func (g *loggerT) Warning(args ...interface{}) { + g.m[warningLog].Print(args...) +} + +func (g *loggerT) Warningln(args ...interface{}) { + g.m[warningLog].Println(args...) +} + +func (g *loggerT) Warningf(format string, args ...interface{}) { + g.m[warningLog].Printf(format, args...) +} + +func (g *loggerT) Error(args ...interface{}) { + g.m[errorLog].Print(args...) +} + +func (g *loggerT) Errorln(args ...interface{}) { + g.m[errorLog].Println(args...) +} + +func (g *loggerT) Errorf(format string, args ...interface{}) { + g.m[errorLog].Printf(format, args...) +} + +func (g *loggerT) Fatal(args ...interface{}) { + g.m[fatalLog].Fatal(args...) + // No need to call os.Exit() again because log.Logger.Fatal() calls os.Exit(). +} + +func (g *loggerT) Fatalln(args ...interface{}) { + g.m[fatalLog].Fatalln(args...) + // No need to call os.Exit() again because log.Logger.Fatal() calls os.Exit(). +} + +func (g *loggerT) Fatalf(format string, args ...interface{}) { + g.m[fatalLog].Fatalf(format, args...) + // No need to call os.Exit() again because log.Logger.Fatal() calls os.Exit(). +} + +func (g *loggerT) V(l int) bool { + return l <= g.v +} diff --git a/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go b/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go new file mode 100644 index 00000000000..e5906de7d43 --- /dev/null +++ b/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go @@ -0,0 +1,227 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: grpc_health_v1/health.proto + +package grpc_health_v1 // import "google.golang.org/grpc/health/grpc_health_v1" + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type HealthCheckResponse_ServingStatus int32 + +const ( + HealthCheckResponse_UNKNOWN HealthCheckResponse_ServingStatus = 0 + HealthCheckResponse_SERVING HealthCheckResponse_ServingStatus = 1 + HealthCheckResponse_NOT_SERVING HealthCheckResponse_ServingStatus = 2 +) + +var HealthCheckResponse_ServingStatus_name = map[int32]string{ + 0: "UNKNOWN", + 1: "SERVING", + 2: "NOT_SERVING", +} +var HealthCheckResponse_ServingStatus_value = map[string]int32{ + "UNKNOWN": 0, + "SERVING": 1, + "NOT_SERVING": 2, +} + +func (x HealthCheckResponse_ServingStatus) String() string { + return proto.EnumName(HealthCheckResponse_ServingStatus_name, int32(x)) +} +func (HealthCheckResponse_ServingStatus) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_health_8e5b8a3074428511, []int{1, 0} +} + +type HealthCheckRequest struct { + Service string `protobuf:"bytes,1,opt,name=service" json:"service,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *HealthCheckRequest) Reset() { *m = HealthCheckRequest{} } +func (m *HealthCheckRequest) String() string { return proto.CompactTextString(m) } +func (*HealthCheckRequest) ProtoMessage() {} +func (*HealthCheckRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_health_8e5b8a3074428511, []int{0} +} +func (m *HealthCheckRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_HealthCheckRequest.Unmarshal(m, b) +} +func (m *HealthCheckRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_HealthCheckRequest.Marshal(b, m, deterministic) +} +func (dst *HealthCheckRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_HealthCheckRequest.Merge(dst, src) +} +func (m *HealthCheckRequest) XXX_Size() int { + return xxx_messageInfo_HealthCheckRequest.Size(m) +} +func (m *HealthCheckRequest) XXX_DiscardUnknown() { + xxx_messageInfo_HealthCheckRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_HealthCheckRequest proto.InternalMessageInfo + +func (m *HealthCheckRequest) GetService() string { + if m != nil { + return m.Service + } + return "" +} + +type HealthCheckResponse struct { + Status HealthCheckResponse_ServingStatus `protobuf:"varint,1,opt,name=status,enum=grpc.health.v1.HealthCheckResponse_ServingStatus" json:"status,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *HealthCheckResponse) Reset() { *m = HealthCheckResponse{} } +func (m *HealthCheckResponse) String() string { return proto.CompactTextString(m) } +func (*HealthCheckResponse) ProtoMessage() {} +func (*HealthCheckResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_health_8e5b8a3074428511, []int{1} +} +func (m *HealthCheckResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_HealthCheckResponse.Unmarshal(m, b) +} +func (m *HealthCheckResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_HealthCheckResponse.Marshal(b, m, deterministic) +} +func (dst *HealthCheckResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_HealthCheckResponse.Merge(dst, src) +} +func (m *HealthCheckResponse) XXX_Size() int { + return xxx_messageInfo_HealthCheckResponse.Size(m) +} +func (m *HealthCheckResponse) XXX_DiscardUnknown() { + xxx_messageInfo_HealthCheckResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_HealthCheckResponse proto.InternalMessageInfo + +func (m *HealthCheckResponse) GetStatus() HealthCheckResponse_ServingStatus { + if m != nil { + return m.Status + } + return HealthCheckResponse_UNKNOWN +} + +func init() { + proto.RegisterType((*HealthCheckRequest)(nil), "grpc.health.v1.HealthCheckRequest") + proto.RegisterType((*HealthCheckResponse)(nil), "grpc.health.v1.HealthCheckResponse") + proto.RegisterEnum("grpc.health.v1.HealthCheckResponse_ServingStatus", HealthCheckResponse_ServingStatus_name, HealthCheckResponse_ServingStatus_value) +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// Client API for Health service + +type HealthClient interface { + Check(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (*HealthCheckResponse, error) +} + +type healthClient struct { + cc *grpc.ClientConn +} + +func NewHealthClient(cc *grpc.ClientConn) HealthClient { + return &healthClient{cc} +} + +func (c *healthClient) Check(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (*HealthCheckResponse, error) { + out := new(HealthCheckResponse) + err := grpc.Invoke(ctx, "/grpc.health.v1.Health/Check", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for Health service + +type HealthServer interface { + Check(context.Context, *HealthCheckRequest) (*HealthCheckResponse, error) +} + +func RegisterHealthServer(s *grpc.Server, srv HealthServer) { + s.RegisterService(&_Health_serviceDesc, srv) +} + +func _Health_Check_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(HealthCheckRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(HealthServer).Check(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/grpc.health.v1.Health/Check", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(HealthServer).Check(ctx, req.(*HealthCheckRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _Health_serviceDesc = grpc.ServiceDesc{ + ServiceName: "grpc.health.v1.Health", + HandlerType: (*HealthServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Check", + Handler: _Health_Check_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "grpc_health_v1/health.proto", +} + +func init() { proto.RegisterFile("grpc_health_v1/health.proto", fileDescriptor_health_8e5b8a3074428511) } + +var fileDescriptor_health_8e5b8a3074428511 = []byte{ + // 269 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4e, 0x2f, 0x2a, 0x48, + 0x8e, 0xcf, 0x48, 0x4d, 0xcc, 0x29, 0xc9, 0x88, 0x2f, 0x33, 0xd4, 0x87, 0xb0, 0xf4, 0x0a, 0x8a, + 0xf2, 0x4b, 0xf2, 0x85, 0xf8, 0x40, 0x92, 0x7a, 0x50, 0xa1, 0x32, 0x43, 0x25, 0x3d, 0x2e, 0x21, + 0x0f, 0x30, 0xc7, 0x39, 0x23, 0x35, 0x39, 0x3b, 0x28, 0xb5, 0xb0, 0x34, 0xb5, 0xb8, 0x44, 0x48, + 0x82, 0x8b, 0xbd, 0x38, 0xb5, 0xa8, 0x2c, 0x33, 0x39, 0x55, 0x82, 0x51, 0x81, 0x51, 0x83, 0x33, + 0x08, 0xc6, 0x55, 0x9a, 0xc3, 0xc8, 0x25, 0x8c, 0xa2, 0xa1, 0xb8, 0x20, 0x3f, 0xaf, 0x38, 0x55, + 0xc8, 0x93, 0x8b, 0xad, 0xb8, 0x24, 0xb1, 0xa4, 0xb4, 0x18, 0xac, 0x81, 0xcf, 0xc8, 0x50, 0x0f, + 0xd5, 0x22, 0x3d, 0x2c, 0x9a, 0xf4, 0x82, 0x41, 0x86, 0xe6, 0xa5, 0x07, 0x83, 0x35, 0x06, 0x41, + 0x0d, 0x50, 0xb2, 0xe2, 0xe2, 0x45, 0x91, 0x10, 0xe2, 0xe6, 0x62, 0x0f, 0xf5, 0xf3, 0xf6, 0xf3, + 0x0f, 0xf7, 0x13, 0x60, 0x00, 0x71, 0x82, 0x5d, 0x83, 0xc2, 0x3c, 0xfd, 0xdc, 0x05, 0x18, 0x85, + 0xf8, 0xb9, 0xb8, 0xfd, 0xfc, 0x43, 0xe2, 0x61, 0x02, 0x4c, 0x46, 0x51, 0x5c, 0x6c, 0x10, 0x8b, + 0x84, 0x02, 0xb8, 0x58, 0xc1, 0x96, 0x09, 0x29, 0xe1, 0x75, 0x09, 0xd8, 0xbf, 0x52, 0xca, 0x44, + 0xb8, 0xd6, 0x29, 0x91, 0x4b, 0x30, 0x33, 0x1f, 0x4d, 0xa1, 0x13, 0x37, 0x44, 0x65, 0x00, 0x28, + 0x70, 0x03, 0x18, 0xa3, 0x74, 0xd2, 0xf3, 0xf3, 0xd3, 0x73, 0x52, 0xf5, 0xd2, 0xf3, 0x73, 0x12, + 0xf3, 0xd2, 0xf5, 0xf2, 0x8b, 0xd2, 0xf5, 0x41, 0x1a, 0xa0, 0x71, 0xa0, 0x8f, 0x1a, 0x33, 0xab, + 0x98, 0xf8, 0xdc, 0x41, 0xa6, 0x41, 0x8c, 0xd0, 0x0b, 0x33, 0x4c, 0x62, 0x03, 0x47, 0x92, 0x31, + 0x20, 0x00, 0x00, 0xff, 0xff, 0xb7, 0x70, 0xc4, 0xa7, 0xc3, 0x01, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/grpc/health/grpc_health_v1/health.proto b/vendor/google.golang.org/grpc/health/grpc_health_v1/health.proto new file mode 100644 index 00000000000..bcc02f8ac83 --- /dev/null +++ b/vendor/google.golang.org/grpc/health/grpc_health_v1/health.proto @@ -0,0 +1,44 @@ +// Copyright 2015, gRPC Authors +// All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// The canonical version of this proto can be found at +// https://github.com/grpc/grpc-proto/blob/master/grpc/health/v1/health.proto + +syntax = "proto3"; + +package grpc.health.v1; + +option csharp_namespace = "Grpc.Health.V1"; +option go_package = "google.golang.org/grpc/health/grpc_health_v1"; +option java_multiple_files = true; +option java_outer_classname = "HealthProto"; +option java_package = "io.grpc.health.v1"; + +message HealthCheckRequest { + string service = 1; +} + +message HealthCheckResponse { + enum ServingStatus { + UNKNOWN = 0; + SERVING = 1; + NOT_SERVING = 2; + } + ServingStatus status = 1; +} + +service Health { + rpc Check(HealthCheckRequest) returns (HealthCheckResponse); +} diff --git a/vendor/google.golang.org/grpc/interceptor.go b/vendor/google.golang.org/grpc/interceptor.go new file mode 100644 index 00000000000..1f6ef678035 --- /dev/null +++ b/vendor/google.golang.org/grpc/interceptor.go @@ -0,0 +1,77 @@ +/* + * + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "golang.org/x/net/context" +) + +// UnaryInvoker is called by UnaryClientInterceptor to complete RPCs. +type UnaryInvoker func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, opts ...CallOption) error + +// UnaryClientInterceptor intercepts the execution of a unary RPC on the client. invoker is the handler to complete the RPC +// and it is the responsibility of the interceptor to call it. +// This is an EXPERIMENTAL API. +type UnaryClientInterceptor func(ctx context.Context, method string, req, reply interface{}, cc *ClientConn, invoker UnaryInvoker, opts ...CallOption) error + +// Streamer is called by StreamClientInterceptor to create a ClientStream. +type Streamer func(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (ClientStream, error) + +// StreamClientInterceptor intercepts the creation of ClientStream. It may return a custom ClientStream to intercept all I/O +// operations. streamer is the handler to create a ClientStream and it is the responsibility of the interceptor to call it. +// This is an EXPERIMENTAL API. +type StreamClientInterceptor func(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, streamer Streamer, opts ...CallOption) (ClientStream, error) + +// UnaryServerInfo consists of various information about a unary RPC on +// server side. All per-rpc information may be mutated by the interceptor. +type UnaryServerInfo struct { + // Server is the service implementation the user provides. This is read-only. + Server interface{} + // FullMethod is the full RPC method string, i.e., /package.service/method. + FullMethod string +} + +// UnaryHandler defines the handler invoked by UnaryServerInterceptor to complete the normal +// execution of a unary RPC. If a UnaryHandler returns an error, it should be produced by the +// status package, or else gRPC will use codes.Unknown as the status code and err.Error() as +// the status message of the RPC. +type UnaryHandler func(ctx context.Context, req interface{}) (interface{}, error) + +// UnaryServerInterceptor provides a hook to intercept the execution of a unary RPC on the server. info +// contains all the information of this RPC the interceptor can operate on. And handler is the wrapper +// of the service method implementation. It is the responsibility of the interceptor to invoke handler +// to complete the RPC. +type UnaryServerInterceptor func(ctx context.Context, req interface{}, info *UnaryServerInfo, handler UnaryHandler) (resp interface{}, err error) + +// StreamServerInfo consists of various information about a streaming RPC on +// server side. All per-rpc information may be mutated by the interceptor. +type StreamServerInfo struct { + // FullMethod is the full RPC method string, i.e., /package.service/method. + FullMethod string + // IsClientStream indicates whether the RPC is a client streaming RPC. + IsClientStream bool + // IsServerStream indicates whether the RPC is a server streaming RPC. + IsServerStream bool +} + +// StreamServerInterceptor provides a hook to intercept the execution of a streaming RPC on the server. +// info contains all the information of this RPC the interceptor can operate on. And handler is the +// service method implementation. It is the responsibility of the interceptor to invoke handler to +// complete the RPC. +type StreamServerInterceptor func(srv interface{}, ss ServerStream, info *StreamServerInfo, handler StreamHandler) error diff --git a/vendor/google.golang.org/grpc/internal/internal.go b/vendor/google.golang.org/grpc/internal/internal.go new file mode 100644 index 00000000000..53f1775201c --- /dev/null +++ b/vendor/google.golang.org/grpc/internal/internal.go @@ -0,0 +1,27 @@ +/* + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package internal contains gRPC-internal code for testing, to avoid polluting +// the godoc of the top-level grpc package. +package internal + +// TestingUseHandlerImpl enables the http.Handler-based server implementation. +// It must be called before Serve and requires TLS credentials. +// +// The provided grpcServer must be of type *grpc.Server. It is untyped +// for circular dependency reasons. +var TestingUseHandlerImpl func(grpcServer interface{}) diff --git a/vendor/google.golang.org/grpc/keepalive/keepalive.go b/vendor/google.golang.org/grpc/keepalive/keepalive.go new file mode 100644 index 00000000000..f8adc7e6d4f --- /dev/null +++ b/vendor/google.golang.org/grpc/keepalive/keepalive.go @@ -0,0 +1,65 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package keepalive defines configurable parameters for point-to-point healthcheck. +package keepalive + +import ( + "time" +) + +// ClientParameters is used to set keepalive parameters on the client-side. +// These configure how the client will actively probe to notice when a connection is broken +// and send pings so intermediaries will be aware of the liveness of the connection. +// Make sure these parameters are set in coordination with the keepalive policy on the server, +// as incompatible settings can result in closing of connection. +type ClientParameters struct { + // After a duration of this time if the client doesn't see any activity it pings the server to see if the transport is still alive. + Time time.Duration // The current default value is infinity. + // After having pinged for keepalive check, the client waits for a duration of Timeout and if no activity is seen even after that + // the connection is closed. + Timeout time.Duration // The current default value is 20 seconds. + // If true, client runs keepalive checks even with no active RPCs. + PermitWithoutStream bool // false by default. +} + +// ServerParameters is used to set keepalive and max-age parameters on the server-side. +type ServerParameters struct { + // MaxConnectionIdle is a duration for the amount of time after which an idle connection would be closed by sending a GoAway. + // Idleness duration is defined since the most recent time the number of outstanding RPCs became zero or the connection establishment. + MaxConnectionIdle time.Duration // The current default value is infinity. + // MaxConnectionAge is a duration for the maximum amount of time a connection may exist before it will be closed by sending a GoAway. + // A random jitter of +/-10% will be added to MaxConnectionAge to spread out connection storms. + MaxConnectionAge time.Duration // The current default value is infinity. + // MaxConnectinoAgeGrace is an additive period after MaxConnectionAge after which the connection will be forcibly closed. + MaxConnectionAgeGrace time.Duration // The current default value is infinity. + // After a duration of this time if the server doesn't see any activity it pings the client to see if the transport is still alive. + Time time.Duration // The current default value is 2 hours. + // After having pinged for keepalive check, the server waits for a duration of Timeout and if no activity is seen even after that + // the connection is closed. + Timeout time.Duration // The current default value is 20 seconds. +} + +// EnforcementPolicy is used to set keepalive enforcement policy on the server-side. +// Server will close connection with a client that violates this policy. +type EnforcementPolicy struct { + // MinTime is the minimum amount of time a client should wait before sending a keepalive ping. + MinTime time.Duration // The current default value is 5 minutes. + // If true, server expects keepalive pings even when there are no active streams(RPCs). + PermitWithoutStream bool // false by default. +} diff --git a/vendor/google.golang.org/grpc/metadata/metadata.go b/vendor/google.golang.org/grpc/metadata/metadata.go new file mode 100644 index 00000000000..bd2eaf40837 --- /dev/null +++ b/vendor/google.golang.org/grpc/metadata/metadata.go @@ -0,0 +1,210 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package metadata define the structure of the metadata supported by gRPC library. +// Please refer to https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md +// for more information about custom-metadata. +package metadata // import "google.golang.org/grpc/metadata" + +import ( + "fmt" + "strings" + + "golang.org/x/net/context" +) + +// DecodeKeyValue returns k, v, nil. +// +// Deprecated: use k and v directly instead. +func DecodeKeyValue(k, v string) (string, string, error) { + return k, v, nil +} + +// MD is a mapping from metadata keys to values. Users should use the following +// two convenience functions New and Pairs to generate MD. +type MD map[string][]string + +// New creates an MD from a given key-value map. +// +// Only the following ASCII characters are allowed in keys: +// - digits: 0-9 +// - uppercase letters: A-Z (normalized to lower) +// - lowercase letters: a-z +// - special characters: -_. +// Uppercase letters are automatically converted to lowercase. +// +// Keys beginning with "grpc-" are reserved for grpc-internal use only and may +// result in errors if set in metadata. +func New(m map[string]string) MD { + md := MD{} + for k, val := range m { + key := strings.ToLower(k) + md[key] = append(md[key], val) + } + return md +} + +// Pairs returns an MD formed by the mapping of key, value ... +// Pairs panics if len(kv) is odd. +// +// Only the following ASCII characters are allowed in keys: +// - digits: 0-9 +// - uppercase letters: A-Z (normalized to lower) +// - lowercase letters: a-z +// - special characters: -_. +// Uppercase letters are automatically converted to lowercase. +// +// Keys beginning with "grpc-" are reserved for grpc-internal use only and may +// result in errors if set in metadata. +func Pairs(kv ...string) MD { + if len(kv)%2 == 1 { + panic(fmt.Sprintf("metadata: Pairs got the odd number of input pairs for metadata: %d", len(kv))) + } + md := MD{} + var key string + for i, s := range kv { + if i%2 == 0 { + key = strings.ToLower(s) + continue + } + md[key] = append(md[key], s) + } + return md +} + +// Len returns the number of items in md. +func (md MD) Len() int { + return len(md) +} + +// Copy returns a copy of md. +func (md MD) Copy() MD { + return Join(md) +} + +// Get obtains the values for a given key. +func (md MD) Get(k string) []string { + k = strings.ToLower(k) + return md[k] +} + +// Set sets the value of a given key with a slice of values. +func (md MD) Set(k string, vals ...string) { + if len(vals) == 0 { + return + } + k = strings.ToLower(k) + md[k] = vals +} + +// Append adds the values to key k, not overwriting what was already stored at that key. +func (md MD) Append(k string, vals ...string) { + if len(vals) == 0 { + return + } + k = strings.ToLower(k) + md[k] = append(md[k], vals...) +} + +// Join joins any number of mds into a single MD. +// The order of values for each key is determined by the order in which +// the mds containing those values are presented to Join. +func Join(mds ...MD) MD { + out := MD{} + for _, md := range mds { + for k, v := range md { + out[k] = append(out[k], v...) + } + } + return out +} + +type mdIncomingKey struct{} +type mdOutgoingKey struct{} + +// NewIncomingContext creates a new context with incoming md attached. +func NewIncomingContext(ctx context.Context, md MD) context.Context { + return context.WithValue(ctx, mdIncomingKey{}, md) +} + +// NewOutgoingContext creates a new context with outgoing md attached. If used +// in conjunction with AppendToOutgoingContext, NewOutgoingContext will +// overwrite any previously-appended metadata. +func NewOutgoingContext(ctx context.Context, md MD) context.Context { + return context.WithValue(ctx, mdOutgoingKey{}, rawMD{md: md}) +} + +// AppendToOutgoingContext returns a new context with the provided kv merged +// with any existing metadata in the context. Please refer to the +// documentation of Pairs for a description of kv. +func AppendToOutgoingContext(ctx context.Context, kv ...string) context.Context { + if len(kv)%2 == 1 { + panic(fmt.Sprintf("metadata: AppendToOutgoingContext got an odd number of input pairs for metadata: %d", len(kv))) + } + md, _ := ctx.Value(mdOutgoingKey{}).(rawMD) + added := make([][]string, len(md.added)+1) + copy(added, md.added) + added[len(added)-1] = make([]string, len(kv)) + copy(added[len(added)-1], kv) + return context.WithValue(ctx, mdOutgoingKey{}, rawMD{md: md.md, added: added}) +} + +// FromIncomingContext returns the incoming metadata in ctx if it exists. The +// returned MD should not be modified. Writing to it may cause races. +// Modification should be made to copies of the returned MD. +func FromIncomingContext(ctx context.Context) (md MD, ok bool) { + md, ok = ctx.Value(mdIncomingKey{}).(MD) + return +} + +// FromOutgoingContextRaw returns the un-merged, intermediary contents +// of rawMD. Remember to perform strings.ToLower on the keys. The returned +// MD should not be modified. Writing to it may cause races. Modification +// should be made to copies of the returned MD. +// +// This is intended for gRPC-internal use ONLY. +func FromOutgoingContextRaw(ctx context.Context) (MD, [][]string, bool) { + raw, ok := ctx.Value(mdOutgoingKey{}).(rawMD) + if !ok { + return nil, nil, false + } + + return raw.md, raw.added, true +} + +// FromOutgoingContext returns the outgoing metadata in ctx if it exists. The +// returned MD should not be modified. Writing to it may cause races. +// Modification should be made to copies of the returned MD. +func FromOutgoingContext(ctx context.Context) (MD, bool) { + raw, ok := ctx.Value(mdOutgoingKey{}).(rawMD) + if !ok { + return nil, false + } + + mds := make([]MD, 0, len(raw.added)+1) + mds = append(mds, raw.md) + for _, vv := range raw.added { + mds = append(mds, Pairs(vv...)) + } + return Join(mds...), ok +} + +type rawMD struct { + md MD + added [][]string +} diff --git a/vendor/google.golang.org/grpc/naming/dns_resolver.go b/vendor/google.golang.org/grpc/naming/dns_resolver.go new file mode 100644 index 00000000000..0f8a908ea9c --- /dev/null +++ b/vendor/google.golang.org/grpc/naming/dns_resolver.go @@ -0,0 +1,290 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package naming + +import ( + "errors" + "fmt" + "net" + "strconv" + "time" + + "golang.org/x/net/context" + "google.golang.org/grpc/grpclog" +) + +const ( + defaultPort = "443" + defaultFreq = time.Minute * 30 +) + +var ( + errMissingAddr = errors.New("missing address") + errWatcherClose = errors.New("watcher has been closed") +) + +// NewDNSResolverWithFreq creates a DNS Resolver that can resolve DNS names, and +// create watchers that poll the DNS server using the frequency set by freq. +func NewDNSResolverWithFreq(freq time.Duration) (Resolver, error) { + return &dnsResolver{freq: freq}, nil +} + +// NewDNSResolver creates a DNS Resolver that can resolve DNS names, and create +// watchers that poll the DNS server using the default frequency defined by defaultFreq. +func NewDNSResolver() (Resolver, error) { + return NewDNSResolverWithFreq(defaultFreq) +} + +// dnsResolver handles name resolution for names following the DNS scheme +type dnsResolver struct { + // frequency of polling the DNS server that the watchers created by this resolver will use. + freq time.Duration +} + +// formatIP returns ok = false if addr is not a valid textual representation of an IP address. +// If addr is an IPv4 address, return the addr and ok = true. +// If addr is an IPv6 address, return the addr enclosed in square brackets and ok = true. +func formatIP(addr string) (addrIP string, ok bool) { + ip := net.ParseIP(addr) + if ip == nil { + return "", false + } + if ip.To4() != nil { + return addr, true + } + return "[" + addr + "]", true +} + +// parseTarget takes the user input target string, returns formatted host and port info. +// If target doesn't specify a port, set the port to be the defaultPort. +// If target is in IPv6 format and host-name is enclosed in sqarue brackets, brackets +// are strippd when setting the host. +// examples: +// target: "www.google.com" returns host: "www.google.com", port: "443" +// target: "ipv4-host:80" returns host: "ipv4-host", port: "80" +// target: "[ipv6-host]" returns host: "ipv6-host", port: "443" +// target: ":80" returns host: "localhost", port: "80" +// target: ":" returns host: "localhost", port: "443" +func parseTarget(target string) (host, port string, err error) { + if target == "" { + return "", "", errMissingAddr + } + + if ip := net.ParseIP(target); ip != nil { + // target is an IPv4 or IPv6(without brackets) address + return target, defaultPort, nil + } + if host, port, err := net.SplitHostPort(target); err == nil { + // target has port, i.e ipv4-host:port, [ipv6-host]:port, host-name:port + if host == "" { + // Keep consistent with net.Dial(): If the host is empty, as in ":80", the local system is assumed. + host = "localhost" + } + if port == "" { + // If the port field is empty(target ends with colon), e.g. "[::1]:", defaultPort is used. + port = defaultPort + } + return host, port, nil + } + if host, port, err := net.SplitHostPort(target + ":" + defaultPort); err == nil { + // target doesn't have port + return host, port, nil + } + return "", "", fmt.Errorf("invalid target address %v", target) +} + +// Resolve creates a watcher that watches the name resolution of the target. +func (r *dnsResolver) Resolve(target string) (Watcher, error) { + host, port, err := parseTarget(target) + if err != nil { + return nil, err + } + + if net.ParseIP(host) != nil { + ipWatcher := &ipWatcher{ + updateChan: make(chan *Update, 1), + } + host, _ = formatIP(host) + ipWatcher.updateChan <- &Update{Op: Add, Addr: host + ":" + port} + return ipWatcher, nil + } + + ctx, cancel := context.WithCancel(context.Background()) + return &dnsWatcher{ + r: r, + host: host, + port: port, + ctx: ctx, + cancel: cancel, + t: time.NewTimer(0), + }, nil +} + +// dnsWatcher watches for the name resolution update for a specific target +type dnsWatcher struct { + r *dnsResolver + host string + port string + // The latest resolved address set + curAddrs map[string]*Update + ctx context.Context + cancel context.CancelFunc + t *time.Timer +} + +// ipWatcher watches for the name resolution update for an IP address. +type ipWatcher struct { + updateChan chan *Update +} + +// Next returns the address resolution Update for the target. For IP address, +// the resolution is itself, thus polling name server is unnecessary. Therefore, +// Next() will return an Update the first time it is called, and will be blocked +// for all following calls as no Update exists until watcher is closed. +func (i *ipWatcher) Next() ([]*Update, error) { + u, ok := <-i.updateChan + if !ok { + return nil, errWatcherClose + } + return []*Update{u}, nil +} + +// Close closes the ipWatcher. +func (i *ipWatcher) Close() { + close(i.updateChan) +} + +// AddressType indicates the address type returned by name resolution. +type AddressType uint8 + +const ( + // Backend indicates the server is a backend server. + Backend AddressType = iota + // GRPCLB indicates the server is a grpclb load balancer. + GRPCLB +) + +// AddrMetadataGRPCLB contains the information the name resolver for grpclb should provide. The +// name resolver used by the grpclb balancer is required to provide this type of metadata in +// its address updates. +type AddrMetadataGRPCLB struct { + // AddrType is the type of server (grpc load balancer or backend). + AddrType AddressType + // ServerName is the name of the grpc load balancer. Used for authentication. + ServerName string +} + +// compileUpdate compares the old resolved addresses and newly resolved addresses, +// and generates an update list +func (w *dnsWatcher) compileUpdate(newAddrs map[string]*Update) []*Update { + var res []*Update + for a, u := range w.curAddrs { + if _, ok := newAddrs[a]; !ok { + u.Op = Delete + res = append(res, u) + } + } + for a, u := range newAddrs { + if _, ok := w.curAddrs[a]; !ok { + res = append(res, u) + } + } + return res +} + +func (w *dnsWatcher) lookupSRV() map[string]*Update { + newAddrs := make(map[string]*Update) + _, srvs, err := lookupSRV(w.ctx, "grpclb", "tcp", w.host) + if err != nil { + grpclog.Infof("grpc: failed dns SRV record lookup due to %v.\n", err) + return nil + } + for _, s := range srvs { + lbAddrs, err := lookupHost(w.ctx, s.Target) + if err != nil { + grpclog.Warningf("grpc: failed load banlacer address dns lookup due to %v.\n", err) + continue + } + for _, a := range lbAddrs { + a, ok := formatIP(a) + if !ok { + grpclog.Errorf("grpc: failed IP parsing due to %v.\n", err) + continue + } + addr := a + ":" + strconv.Itoa(int(s.Port)) + newAddrs[addr] = &Update{Addr: addr, + Metadata: AddrMetadataGRPCLB{AddrType: GRPCLB, ServerName: s.Target}} + } + } + return newAddrs +} + +func (w *dnsWatcher) lookupHost() map[string]*Update { + newAddrs := make(map[string]*Update) + addrs, err := lookupHost(w.ctx, w.host) + if err != nil { + grpclog.Warningf("grpc: failed dns A record lookup due to %v.\n", err) + return nil + } + for _, a := range addrs { + a, ok := formatIP(a) + if !ok { + grpclog.Errorf("grpc: failed IP parsing due to %v.\n", err) + continue + } + addr := a + ":" + w.port + newAddrs[addr] = &Update{Addr: addr} + } + return newAddrs +} + +func (w *dnsWatcher) lookup() []*Update { + newAddrs := w.lookupSRV() + if newAddrs == nil { + // If failed to get any balancer address (either no corresponding SRV for the + // target, or caused by failure during resolution/parsing of the balancer target), + // return any A record info available. + newAddrs = w.lookupHost() + } + result := w.compileUpdate(newAddrs) + w.curAddrs = newAddrs + return result +} + +// Next returns the resolved address update(delta) for the target. If there's no +// change, it will sleep for 30 mins and try to resolve again after that. +func (w *dnsWatcher) Next() ([]*Update, error) { + for { + select { + case <-w.ctx.Done(): + return nil, errWatcherClose + case <-w.t.C: + } + result := w.lookup() + // Next lookup should happen after an interval defined by w.r.freq. + w.t.Reset(w.r.freq) + if len(result) > 0 { + return result, nil + } + } +} + +func (w *dnsWatcher) Close() { + w.cancel() +} diff --git a/vendor/google.golang.org/grpc/naming/go17.go b/vendor/google.golang.org/grpc/naming/go17.go new file mode 100644 index 00000000000..57b65d7b889 --- /dev/null +++ b/vendor/google.golang.org/grpc/naming/go17.go @@ -0,0 +1,34 @@ +// +build go1.6,!go1.8 + +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package naming + +import ( + "net" + + "golang.org/x/net/context" +) + +var ( + lookupHost = func(ctx context.Context, host string) ([]string, error) { return net.LookupHost(host) } + lookupSRV = func(ctx context.Context, service, proto, name string) (string, []*net.SRV, error) { + return net.LookupSRV(service, proto, name) + } +) diff --git a/vendor/google.golang.org/grpc/naming/go18.go b/vendor/google.golang.org/grpc/naming/go18.go new file mode 100644 index 00000000000..b5a0f842748 --- /dev/null +++ b/vendor/google.golang.org/grpc/naming/go18.go @@ -0,0 +1,28 @@ +// +build go1.8 + +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package naming + +import "net" + +var ( + lookupHost = net.DefaultResolver.LookupHost + lookupSRV = net.DefaultResolver.LookupSRV +) diff --git a/vendor/google.golang.org/grpc/naming/naming.go b/vendor/google.golang.org/grpc/naming/naming.go new file mode 100644 index 00000000000..8cc39e93758 --- /dev/null +++ b/vendor/google.golang.org/grpc/naming/naming.go @@ -0,0 +1,69 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package naming defines the naming API and related data structures for gRPC. +// The interface is EXPERIMENTAL and may be suject to change. +// +// Deprecated: please use package resolver. +package naming + +// Operation defines the corresponding operations for a name resolution change. +// +// Deprecated: please use package resolver. +type Operation uint8 + +const ( + // Add indicates a new address is added. + Add Operation = iota + // Delete indicates an existing address is deleted. + Delete +) + +// Update defines a name resolution update. Notice that it is not valid having both +// empty string Addr and nil Metadata in an Update. +// +// Deprecated: please use package resolver. +type Update struct { + // Op indicates the operation of the update. + Op Operation + // Addr is the updated address. It is empty string if there is no address update. + Addr string + // Metadata is the updated metadata. It is nil if there is no metadata update. + // Metadata is not required for a custom naming implementation. + Metadata interface{} +} + +// Resolver creates a Watcher for a target to track its resolution changes. +// +// Deprecated: please use package resolver. +type Resolver interface { + // Resolve creates a Watcher for target. + Resolve(target string) (Watcher, error) +} + +// Watcher watches for the updates on the specified target. +// +// Deprecated: please use package resolver. +type Watcher interface { + // Next blocks until an update or error happens. It may return one or more + // updates. The first call should get the full set of the results. It should + // return an error if and only if Watcher cannot recover. + Next() ([]*Update, error) + // Close closes the Watcher. + Close() +} diff --git a/vendor/google.golang.org/grpc/peer/peer.go b/vendor/google.golang.org/grpc/peer/peer.go new file mode 100644 index 00000000000..317b8b9d09a --- /dev/null +++ b/vendor/google.golang.org/grpc/peer/peer.go @@ -0,0 +1,51 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package peer defines various peer information associated with RPCs and +// corresponding utils. +package peer + +import ( + "net" + + "golang.org/x/net/context" + "google.golang.org/grpc/credentials" +) + +// Peer contains the information of the peer for an RPC, such as the address +// and authentication information. +type Peer struct { + // Addr is the peer address. + Addr net.Addr + // AuthInfo is the authentication information of the transport. + // It is nil if there is no transport security being used. + AuthInfo credentials.AuthInfo +} + +type peerKey struct{} + +// NewContext creates a new context with peer information attached. +func NewContext(ctx context.Context, p *Peer) context.Context { + return context.WithValue(ctx, peerKey{}, p) +} + +// FromContext returns the peer information in ctx if it exists. +func FromContext(ctx context.Context) (p *Peer, ok bool) { + p, ok = ctx.Value(peerKey{}).(*Peer) + return +} diff --git a/vendor/google.golang.org/grpc/picker_wrapper.go b/vendor/google.golang.org/grpc/picker_wrapper.go new file mode 100644 index 00000000000..0a984e6c8af --- /dev/null +++ b/vendor/google.golang.org/grpc/picker_wrapper.go @@ -0,0 +1,331 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "io" + "sync" + "sync/atomic" + + "golang.org/x/net/context" + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/channelz" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/resolver" + "google.golang.org/grpc/status" + "google.golang.org/grpc/transport" +) + +// pickerWrapper is a wrapper of balancer.Picker. It blocks on certain pick +// actions and unblock when there's a picker update. +type pickerWrapper struct { + mu sync.Mutex + done bool + blockingCh chan struct{} + picker balancer.Picker + + // The latest connection happened. + connErrMu sync.Mutex + connErr error + + stickinessMDKey atomic.Value + stickiness *stickyStore +} + +func newPickerWrapper() *pickerWrapper { + bp := &pickerWrapper{ + blockingCh: make(chan struct{}), + stickiness: newStickyStore(), + } + return bp +} + +func (bp *pickerWrapper) updateConnectionError(err error) { + bp.connErrMu.Lock() + bp.connErr = err + bp.connErrMu.Unlock() +} + +func (bp *pickerWrapper) connectionError() error { + bp.connErrMu.Lock() + err := bp.connErr + bp.connErrMu.Unlock() + return err +} + +func (bp *pickerWrapper) updateStickinessMDKey(newKey string) { + // No need to check ok because mdKey == "" if ok == false. + if oldKey, _ := bp.stickinessMDKey.Load().(string); oldKey != newKey { + bp.stickinessMDKey.Store(newKey) + bp.stickiness.reset(newKey) + } +} + +func (bp *pickerWrapper) getStickinessMDKey() string { + // No need to check ok because mdKey == "" if ok == false. + mdKey, _ := bp.stickinessMDKey.Load().(string) + return mdKey +} + +func (bp *pickerWrapper) clearStickinessState() { + if oldKey := bp.getStickinessMDKey(); oldKey != "" { + // There's no need to reset store if mdKey was "". + bp.stickiness.reset(oldKey) + } +} + +// updatePicker is called by UpdateBalancerState. It unblocks all blocked pick. +func (bp *pickerWrapper) updatePicker(p balancer.Picker) { + bp.mu.Lock() + if bp.done { + bp.mu.Unlock() + return + } + bp.picker = p + // bp.blockingCh should never be nil. + close(bp.blockingCh) + bp.blockingCh = make(chan struct{}) + bp.mu.Unlock() +} + +func doneChannelzWrapper(acw *acBalancerWrapper, done func(balancer.DoneInfo)) func(balancer.DoneInfo) { + acw.mu.Lock() + ac := acw.ac + acw.mu.Unlock() + ac.incrCallsStarted() + return func(b balancer.DoneInfo) { + if b.Err != nil && b.Err != io.EOF { + ac.incrCallsFailed() + } else { + ac.incrCallsSucceeded() + } + if done != nil { + done(b) + } + } +} + +// pick returns the transport that will be used for the RPC. +// It may block in the following cases: +// - there's no picker +// - the current picker returns ErrNoSubConnAvailable +// - the current picker returns other errors and failfast is false. +// - the subConn returned by the current picker is not READY +// When one of these situations happens, pick blocks until the picker gets updated. +func (bp *pickerWrapper) pick(ctx context.Context, failfast bool, opts balancer.PickOptions) (transport.ClientTransport, func(balancer.DoneInfo), error) { + + mdKey := bp.getStickinessMDKey() + stickyKey, isSticky := stickyKeyFromContext(ctx, mdKey) + + // Potential race here: if stickinessMDKey is updated after the above two + // lines, and this pick is a sticky pick, the following put could add an + // entry to sticky store with an outdated sticky key. + // + // The solution: keep the current md key in sticky store, and at the + // beginning of each get/put, check the mdkey against store.curMDKey. + // - Cons: one more string comparing for each get/put. + // - Pros: the string matching happens inside get/put, so the overhead for + // non-sticky RPCs will be minimal. + + if isSticky { + if t, ok := bp.stickiness.get(mdKey, stickyKey); ok { + // Done function returned is always nil. + return t, nil, nil + } + } + + var ( + p balancer.Picker + ch chan struct{} + ) + + for { + bp.mu.Lock() + if bp.done { + bp.mu.Unlock() + return nil, nil, ErrClientConnClosing + } + + if bp.picker == nil { + ch = bp.blockingCh + } + if ch == bp.blockingCh { + // This could happen when either: + // - bp.picker is nil (the previous if condition), or + // - has called pick on the current picker. + bp.mu.Unlock() + select { + case <-ctx.Done(): + return nil, nil, ctx.Err() + case <-ch: + } + continue + } + + ch = bp.blockingCh + p = bp.picker + bp.mu.Unlock() + + subConn, done, err := p.Pick(ctx, opts) + + if err != nil { + switch err { + case balancer.ErrNoSubConnAvailable: + continue + case balancer.ErrTransientFailure: + if !failfast { + continue + } + return nil, nil, status.Errorf(codes.Unavailable, "%v, latest connection error: %v", err, bp.connectionError()) + default: + // err is some other error. + return nil, nil, toRPCErr(err) + } + } + + acw, ok := subConn.(*acBalancerWrapper) + if !ok { + grpclog.Infof("subconn returned from pick is not *acBalancerWrapper") + continue + } + if t, ok := acw.getAddrConn().getReadyTransport(); ok { + if isSticky { + bp.stickiness.put(mdKey, stickyKey, acw) + } + if channelz.IsOn() { + return t, doneChannelzWrapper(acw, done), nil + } + return t, done, nil + } + grpclog.Infof("blockingPicker: the picked transport is not ready, loop back to repick") + // If ok == false, ac.state is not READY. + // A valid picker always returns READY subConn. This means the state of ac + // just changed, and picker will be updated shortly. + // continue back to the beginning of the for loop to repick. + } +} + +func (bp *pickerWrapper) close() { + bp.mu.Lock() + defer bp.mu.Unlock() + if bp.done { + return + } + bp.done = true + close(bp.blockingCh) +} + +type stickyStoreEntry struct { + acw *acBalancerWrapper + addr resolver.Address +} + +type stickyStore struct { + mu sync.Mutex + // curMDKey is check before every get/put to avoid races. The operation will + // abort immediately when the given mdKey is different from the curMDKey. + curMDKey string + store map[string]*stickyStoreEntry +} + +func newStickyStore() *stickyStore { + return &stickyStore{ + store: make(map[string]*stickyStoreEntry), + } +} + +// reset clears the map in stickyStore, and set the currentMDKey to newMDKey. +func (ss *stickyStore) reset(newMDKey string) { + ss.mu.Lock() + ss.curMDKey = newMDKey + ss.store = make(map[string]*stickyStoreEntry) + ss.mu.Unlock() +} + +// stickyKey is the key to look up in store. mdKey will be checked against +// curMDKey to avoid races. +func (ss *stickyStore) put(mdKey, stickyKey string, acw *acBalancerWrapper) { + ss.mu.Lock() + defer ss.mu.Unlock() + if mdKey != ss.curMDKey { + return + } + // TODO(stickiness): limit the total number of entries. + ss.store[stickyKey] = &stickyStoreEntry{ + acw: acw, + addr: acw.getAddrConn().getCurAddr(), + } +} + +// stickyKey is the key to look up in store. mdKey will be checked against +// curMDKey to avoid races. +func (ss *stickyStore) get(mdKey, stickyKey string) (transport.ClientTransport, bool) { + ss.mu.Lock() + defer ss.mu.Unlock() + if mdKey != ss.curMDKey { + return nil, false + } + entry, ok := ss.store[stickyKey] + if !ok { + return nil, false + } + ac := entry.acw.getAddrConn() + if ac.getCurAddr() != entry.addr { + delete(ss.store, stickyKey) + return nil, false + } + t, ok := ac.getReadyTransport() + if !ok { + delete(ss.store, stickyKey) + return nil, false + } + return t, true +} + +// Get one value from metadata in ctx with key stickinessMDKey. +// +// It returns "", false if stickinessMDKey is an empty string. +func stickyKeyFromContext(ctx context.Context, stickinessMDKey string) (string, bool) { + if stickinessMDKey == "" { + return "", false + } + + md, added, ok := metadata.FromOutgoingContextRaw(ctx) + if !ok { + return "", false + } + + if vv, ok := md[stickinessMDKey]; ok { + if len(vv) > 0 { + return vv[0], true + } + } + + for _, ss := range added { + for i := 0; i < len(ss)-1; i += 2 { + if ss[i] == stickinessMDKey { + return ss[i+1], true + } + } + } + + return "", false +} diff --git a/vendor/google.golang.org/grpc/pickfirst.go b/vendor/google.golang.org/grpc/pickfirst.go new file mode 100644 index 00000000000..bf659d49d2f --- /dev/null +++ b/vendor/google.golang.org/grpc/pickfirst.go @@ -0,0 +1,108 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "golang.org/x/net/context" + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/connectivity" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/resolver" +) + +// PickFirstBalancerName is the name of the pick_first balancer. +const PickFirstBalancerName = "pick_first" + +func newPickfirstBuilder() balancer.Builder { + return &pickfirstBuilder{} +} + +type pickfirstBuilder struct{} + +func (*pickfirstBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) balancer.Balancer { + return &pickfirstBalancer{cc: cc} +} + +func (*pickfirstBuilder) Name() string { + return PickFirstBalancerName +} + +type pickfirstBalancer struct { + cc balancer.ClientConn + sc balancer.SubConn +} + +func (b *pickfirstBalancer) HandleResolvedAddrs(addrs []resolver.Address, err error) { + if err != nil { + grpclog.Infof("pickfirstBalancer: HandleResolvedAddrs called with error %v", err) + return + } + if b.sc == nil { + b.sc, err = b.cc.NewSubConn(addrs, balancer.NewSubConnOptions{}) + if err != nil { + grpclog.Errorf("pickfirstBalancer: failed to NewSubConn: %v", err) + return + } + b.cc.UpdateBalancerState(connectivity.Idle, &picker{sc: b.sc}) + b.sc.Connect() + } else { + b.sc.UpdateAddresses(addrs) + b.sc.Connect() + } +} + +func (b *pickfirstBalancer) HandleSubConnStateChange(sc balancer.SubConn, s connectivity.State) { + grpclog.Infof("pickfirstBalancer: HandleSubConnStateChange: %p, %v", sc, s) + if b.sc != sc { + grpclog.Infof("pickfirstBalancer: ignored state change because sc is not recognized") + return + } + if s == connectivity.Shutdown { + b.sc = nil + return + } + + switch s { + case connectivity.Ready, connectivity.Idle: + b.cc.UpdateBalancerState(s, &picker{sc: sc}) + case connectivity.Connecting: + b.cc.UpdateBalancerState(s, &picker{err: balancer.ErrNoSubConnAvailable}) + case connectivity.TransientFailure: + b.cc.UpdateBalancerState(s, &picker{err: balancer.ErrTransientFailure}) + } +} + +func (b *pickfirstBalancer) Close() { +} + +type picker struct { + err error + sc balancer.SubConn +} + +func (p *picker) Pick(ctx context.Context, opts balancer.PickOptions) (balancer.SubConn, func(balancer.DoneInfo), error) { + if p.err != nil { + return nil, nil, p.err + } + return p.sc, nil, nil +} + +func init() { + balancer.Register(newPickfirstBuilder()) +} diff --git a/vendor/google.golang.org/grpc/proxy.go b/vendor/google.golang.org/grpc/proxy.go new file mode 100644 index 00000000000..2d40236e218 --- /dev/null +++ b/vendor/google.golang.org/grpc/proxy.go @@ -0,0 +1,130 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "bufio" + "errors" + "fmt" + "io" + "net" + "net/http" + "net/http/httputil" + "net/url" + + "golang.org/x/net/context" +) + +var ( + // errDisabled indicates that proxy is disabled for the address. + errDisabled = errors.New("proxy is disabled for the address") + // The following variable will be overwritten in the tests. + httpProxyFromEnvironment = http.ProxyFromEnvironment +) + +func mapAddress(ctx context.Context, address string) (string, error) { + req := &http.Request{ + URL: &url.URL{ + Scheme: "https", + Host: address, + }, + } + url, err := httpProxyFromEnvironment(req) + if err != nil { + return "", err + } + if url == nil { + return "", errDisabled + } + return url.Host, nil +} + +// To read a response from a net.Conn, http.ReadResponse() takes a bufio.Reader. +// It's possible that this reader reads more than what's need for the response and stores +// those bytes in the buffer. +// bufConn wraps the original net.Conn and the bufio.Reader to make sure we don't lose the +// bytes in the buffer. +type bufConn struct { + net.Conn + r io.Reader +} + +func (c *bufConn) Read(b []byte) (int, error) { + return c.r.Read(b) +} + +func doHTTPConnectHandshake(ctx context.Context, conn net.Conn, addr string) (_ net.Conn, err error) { + defer func() { + if err != nil { + conn.Close() + } + }() + + req := (&http.Request{ + Method: http.MethodConnect, + URL: &url.URL{Host: addr}, + Header: map[string][]string{"User-Agent": {grpcUA}}, + }) + + if err := sendHTTPRequest(ctx, req, conn); err != nil { + return nil, fmt.Errorf("failed to write the HTTP request: %v", err) + } + + r := bufio.NewReader(conn) + resp, err := http.ReadResponse(r, req) + if err != nil { + return nil, fmt.Errorf("reading server HTTP response: %v", err) + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + dump, err := httputil.DumpResponse(resp, true) + if err != nil { + return nil, fmt.Errorf("failed to do connect handshake, status code: %s", resp.Status) + } + return nil, fmt.Errorf("failed to do connect handshake, response: %q", dump) + } + + return &bufConn{Conn: conn, r: r}, nil +} + +// newProxyDialer returns a dialer that connects to proxy first if necessary. +// The returned dialer checks if a proxy is necessary, dial to the proxy with the +// provided dialer, does HTTP CONNECT handshake and returns the connection. +func newProxyDialer(dialer func(context.Context, string) (net.Conn, error)) func(context.Context, string) (net.Conn, error) { + return func(ctx context.Context, addr string) (conn net.Conn, err error) { + var skipHandshake bool + newAddr, err := mapAddress(ctx, addr) + if err != nil { + if err != errDisabled { + return nil, err + } + skipHandshake = true + newAddr = addr + } + + conn, err = dialer(ctx, newAddr) + if err != nil { + return + } + if !skipHandshake { + conn, err = doHTTPConnectHandshake(ctx, conn, addr) + } + return + } +} diff --git a/vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go b/vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go new file mode 100644 index 00000000000..c1cabfc995f --- /dev/null +++ b/vendor/google.golang.org/grpc/resolver/dns/dns_resolver.go @@ -0,0 +1,379 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package dns implements a dns resolver to be installed as the default resolver +// in grpc. +package dns + +import ( + "encoding/json" + "errors" + "fmt" + "math/rand" + "net" + "os" + "strconv" + "strings" + "sync" + "time" + + "golang.org/x/net/context" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/resolver" +) + +func init() { + resolver.Register(NewBuilder()) +} + +const ( + defaultPort = "443" + defaultFreq = time.Minute * 30 + golang = "GO" + // In DNS, service config is encoded in a TXT record via the mechanism + // described in RFC-1464 using the attribute name grpc_config. + txtAttribute = "grpc_config=" +) + +var ( + errMissingAddr = errors.New("missing address") + randomGen = rand.New(rand.NewSource(time.Now().UnixNano())) +) + +// NewBuilder creates a dnsBuilder which is used to factory DNS resolvers. +func NewBuilder() resolver.Builder { + return &dnsBuilder{freq: defaultFreq} +} + +type dnsBuilder struct { + // frequency of polling the DNS server. + freq time.Duration +} + +// Build creates and starts a DNS resolver that watches the name resolution of the target. +func (b *dnsBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOption) (resolver.Resolver, error) { + host, port, err := parseTarget(target.Endpoint) + if err != nil { + return nil, err + } + + // IP address. + if net.ParseIP(host) != nil { + host, _ = formatIP(host) + addr := []resolver.Address{{Addr: host + ":" + port}} + i := &ipResolver{ + cc: cc, + ip: addr, + rn: make(chan struct{}, 1), + q: make(chan struct{}), + } + cc.NewAddress(addr) + go i.watcher() + return i, nil + } + + // DNS address (non-IP). + ctx, cancel := context.WithCancel(context.Background()) + d := &dnsResolver{ + freq: b.freq, + host: host, + port: port, + ctx: ctx, + cancel: cancel, + cc: cc, + t: time.NewTimer(0), + rn: make(chan struct{}, 1), + disableServiceConfig: opts.DisableServiceConfig, + } + + d.wg.Add(1) + go d.watcher() + return d, nil +} + +// Scheme returns the naming scheme of this resolver builder, which is "dns". +func (b *dnsBuilder) Scheme() string { + return "dns" +} + +// ipResolver watches for the name resolution update for an IP address. +type ipResolver struct { + cc resolver.ClientConn + ip []resolver.Address + // rn channel is used by ResolveNow() to force an immediate resolution of the target. + rn chan struct{} + q chan struct{} +} + +// ResolveNow resend the address it stores, no resolution is needed. +func (i *ipResolver) ResolveNow(opt resolver.ResolveNowOption) { + select { + case i.rn <- struct{}{}: + default: + } +} + +// Close closes the ipResolver. +func (i *ipResolver) Close() { + close(i.q) +} + +func (i *ipResolver) watcher() { + for { + select { + case <-i.rn: + i.cc.NewAddress(i.ip) + case <-i.q: + return + } + } +} + +// dnsResolver watches for the name resolution update for a non-IP target. +type dnsResolver struct { + freq time.Duration + host string + port string + ctx context.Context + cancel context.CancelFunc + cc resolver.ClientConn + // rn channel is used by ResolveNow() to force an immediate resolution of the target. + rn chan struct{} + t *time.Timer + // wg is used to enforce Close() to return after the watcher() goroutine has finished. + // Otherwise, data race will be possible. [Race Example] in dns_resolver_test we + // replace the real lookup functions with mocked ones to facilitate testing. + // If Close() doesn't wait for watcher() goroutine finishes, race detector sometimes + // will warns lookup (READ the lookup function pointers) inside watcher() goroutine + // has data race with replaceNetFunc (WRITE the lookup function pointers). + wg sync.WaitGroup + disableServiceConfig bool +} + +// ResolveNow invoke an immediate resolution of the target that this dnsResolver watches. +func (d *dnsResolver) ResolveNow(opt resolver.ResolveNowOption) { + select { + case d.rn <- struct{}{}: + default: + } +} + +// Close closes the dnsResolver. +func (d *dnsResolver) Close() { + d.cancel() + d.wg.Wait() + d.t.Stop() +} + +func (d *dnsResolver) watcher() { + defer d.wg.Done() + for { + select { + case <-d.ctx.Done(): + return + case <-d.t.C: + case <-d.rn: + } + result, sc := d.lookup() + // Next lookup should happen after an interval defined by d.freq. + d.t.Reset(d.freq) + d.cc.NewServiceConfig(sc) + d.cc.NewAddress(result) + } +} + +func (d *dnsResolver) lookupSRV() []resolver.Address { + var newAddrs []resolver.Address + _, srvs, err := lookupSRV(d.ctx, "grpclb", "tcp", d.host) + if err != nil { + grpclog.Infof("grpc: failed dns SRV record lookup due to %v.\n", err) + return nil + } + for _, s := range srvs { + lbAddrs, err := lookupHost(d.ctx, s.Target) + if err != nil { + grpclog.Infof("grpc: failed load balancer address dns lookup due to %v.\n", err) + continue + } + for _, a := range lbAddrs { + a, ok := formatIP(a) + if !ok { + grpclog.Errorf("grpc: failed IP parsing due to %v.\n", err) + continue + } + addr := a + ":" + strconv.Itoa(int(s.Port)) + newAddrs = append(newAddrs, resolver.Address{Addr: addr, Type: resolver.GRPCLB, ServerName: s.Target}) + } + } + return newAddrs +} + +func (d *dnsResolver) lookupTXT() string { + ss, err := lookupTXT(d.ctx, d.host) + if err != nil { + grpclog.Infof("grpc: failed dns TXT record lookup due to %v.\n", err) + return "" + } + var res string + for _, s := range ss { + res += s + } + + // TXT record must have "grpc_config=" attribute in order to be used as service config. + if !strings.HasPrefix(res, txtAttribute) { + grpclog.Warningf("grpc: TXT record %v missing %v attribute", res, txtAttribute) + return "" + } + return strings.TrimPrefix(res, txtAttribute) +} + +func (d *dnsResolver) lookupHost() []resolver.Address { + var newAddrs []resolver.Address + addrs, err := lookupHost(d.ctx, d.host) + if err != nil { + grpclog.Warningf("grpc: failed dns A record lookup due to %v.\n", err) + return nil + } + for _, a := range addrs { + a, ok := formatIP(a) + if !ok { + grpclog.Errorf("grpc: failed IP parsing due to %v.\n", err) + continue + } + addr := a + ":" + d.port + newAddrs = append(newAddrs, resolver.Address{Addr: addr}) + } + return newAddrs +} + +func (d *dnsResolver) lookup() ([]resolver.Address, string) { + newAddrs := d.lookupSRV() + // Support fallback to non-balancer address. + newAddrs = append(newAddrs, d.lookupHost()...) + if d.disableServiceConfig { + return newAddrs, "" + } + sc := d.lookupTXT() + return newAddrs, canaryingSC(sc) +} + +// formatIP returns ok = false if addr is not a valid textual representation of an IP address. +// If addr is an IPv4 address, return the addr and ok = true. +// If addr is an IPv6 address, return the addr enclosed in square brackets and ok = true. +func formatIP(addr string) (addrIP string, ok bool) { + ip := net.ParseIP(addr) + if ip == nil { + return "", false + } + if ip.To4() != nil { + return addr, true + } + return "[" + addr + "]", true +} + +// parseTarget takes the user input target string, returns formatted host and port info. +// If target doesn't specify a port, set the port to be the defaultPort. +// If target is in IPv6 format and host-name is enclosed in sqarue brackets, brackets +// are strippd when setting the host. +// examples: +// target: "www.google.com" returns host: "www.google.com", port: "443" +// target: "ipv4-host:80" returns host: "ipv4-host", port: "80" +// target: "[ipv6-host]" returns host: "ipv6-host", port: "443" +// target: ":80" returns host: "localhost", port: "80" +// target: ":" returns host: "localhost", port: "443" +func parseTarget(target string) (host, port string, err error) { + if target == "" { + return "", "", errMissingAddr + } + if ip := net.ParseIP(target); ip != nil { + // target is an IPv4 or IPv6(without brackets) address + return target, defaultPort, nil + } + if host, port, err = net.SplitHostPort(target); err == nil { + // target has port, i.e ipv4-host:port, [ipv6-host]:port, host-name:port + if host == "" { + // Keep consistent with net.Dial(): If the host is empty, as in ":80", the local system is assumed. + host = "localhost" + } + if port == "" { + // If the port field is empty(target ends with colon), e.g. "[::1]:", defaultPort is used. + port = defaultPort + } + return host, port, nil + } + if host, port, err = net.SplitHostPort(target + ":" + defaultPort); err == nil { + // target doesn't have port + return host, port, nil + } + return "", "", fmt.Errorf("invalid target address %v, error info: %v", target, err) +} + +type rawChoice struct { + ClientLanguage *[]string `json:"clientLanguage,omitempty"` + Percentage *int `json:"percentage,omitempty"` + ClientHostName *[]string `json:"clientHostName,omitempty"` + ServiceConfig *json.RawMessage `json:"serviceConfig,omitempty"` +} + +func containsString(a *[]string, b string) bool { + if a == nil { + return true + } + for _, c := range *a { + if c == b { + return true + } + } + return false +} + +func chosenByPercentage(a *int) bool { + if a == nil { + return true + } + return randomGen.Intn(100)+1 <= *a +} + +func canaryingSC(js string) string { + if js == "" { + return "" + } + var rcs []rawChoice + err := json.Unmarshal([]byte(js), &rcs) + if err != nil { + grpclog.Warningf("grpc: failed to parse service config json string due to %v.\n", err) + return "" + } + cliHostname, err := os.Hostname() + if err != nil { + grpclog.Warningf("grpc: failed to get client hostname due to %v.\n", err) + return "" + } + var sc string + for _, c := range rcs { + if !containsString(c.ClientLanguage, golang) || + !chosenByPercentage(c.Percentage) || + !containsString(c.ClientHostName, cliHostname) || + c.ServiceConfig == nil { + continue + } + sc = string(*c.ServiceConfig) + break + } + return sc +} diff --git a/vendor/google.golang.org/grpc/resolver/dns/go17.go b/vendor/google.golang.org/grpc/resolver/dns/go17.go new file mode 100644 index 00000000000..b466bc8f6d4 --- /dev/null +++ b/vendor/google.golang.org/grpc/resolver/dns/go17.go @@ -0,0 +1,35 @@ +// +build go1.6, !go1.8 + +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package dns + +import ( + "net" + + "golang.org/x/net/context" +) + +var ( + lookupHost = func(ctx context.Context, host string) ([]string, error) { return net.LookupHost(host) } + lookupSRV = func(ctx context.Context, service, proto, name string) (string, []*net.SRV, error) { + return net.LookupSRV(service, proto, name) + } + lookupTXT = func(ctx context.Context, name string) ([]string, error) { return net.LookupTXT(name) } +) diff --git a/vendor/google.golang.org/grpc/resolver/dns/go18.go b/vendor/google.golang.org/grpc/resolver/dns/go18.go new file mode 100644 index 00000000000..fa34f14cad4 --- /dev/null +++ b/vendor/google.golang.org/grpc/resolver/dns/go18.go @@ -0,0 +1,29 @@ +// +build go1.8 + +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package dns + +import "net" + +var ( + lookupHost = net.DefaultResolver.LookupHost + lookupSRV = net.DefaultResolver.LookupSRV + lookupTXT = net.DefaultResolver.LookupTXT +) diff --git a/vendor/google.golang.org/grpc/resolver/passthrough/passthrough.go b/vendor/google.golang.org/grpc/resolver/passthrough/passthrough.go new file mode 100644 index 00000000000..b76010d74d1 --- /dev/null +++ b/vendor/google.golang.org/grpc/resolver/passthrough/passthrough.go @@ -0,0 +1,57 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package passthrough implements a pass-through resolver. It sends the target +// name without scheme back to gRPC as resolved address. +package passthrough + +import "google.golang.org/grpc/resolver" + +const scheme = "passthrough" + +type passthroughBuilder struct{} + +func (*passthroughBuilder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOption) (resolver.Resolver, error) { + r := &passthroughResolver{ + target: target, + cc: cc, + } + r.start() + return r, nil +} + +func (*passthroughBuilder) Scheme() string { + return scheme +} + +type passthroughResolver struct { + target resolver.Target + cc resolver.ClientConn +} + +func (r *passthroughResolver) start() { + r.cc.NewAddress([]resolver.Address{{Addr: r.target.Endpoint}}) +} + +func (*passthroughResolver) ResolveNow(o resolver.ResolveNowOption) {} + +func (*passthroughResolver) Close() {} + +func init() { + resolver.Register(&passthroughBuilder{}) +} diff --git a/vendor/google.golang.org/grpc/resolver/resolver.go b/vendor/google.golang.org/grpc/resolver/resolver.go new file mode 100644 index 00000000000..506afac88ae --- /dev/null +++ b/vendor/google.golang.org/grpc/resolver/resolver.go @@ -0,0 +1,154 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package resolver defines APIs for name resolution in gRPC. +// All APIs in this package are experimental. +package resolver + +var ( + // m is a map from scheme to resolver builder. + m = make(map[string]Builder) + // defaultScheme is the default scheme to use. + defaultScheme = "passthrough" +) + +// TODO(bar) install dns resolver in init(){}. + +// Register registers the resolver builder to the resolver map. b.Scheme will be +// used as the scheme registered with this builder. +// +// NOTE: this function must only be called during initialization time (i.e. in +// an init() function), and is not thread-safe. If multiple Resolvers are +// registered with the same name, the one registered last will take effect. +func Register(b Builder) { + m[b.Scheme()] = b +} + +// Get returns the resolver builder registered with the given scheme. +// +// If no builder is register with the scheme, nil will be returned. +func Get(scheme string) Builder { + if b, ok := m[scheme]; ok { + return b + } + return nil +} + +// SetDefaultScheme sets the default scheme that will be used. +// The default default scheme is "passthrough". +func SetDefaultScheme(scheme string) { + defaultScheme = scheme +} + +// GetDefaultScheme gets the default scheme that will be used. +func GetDefaultScheme() string { + return defaultScheme +} + +// AddressType indicates the address type returned by name resolution. +type AddressType uint8 + +const ( + // Backend indicates the address is for a backend server. + Backend AddressType = iota + // GRPCLB indicates the address is for a grpclb load balancer. + GRPCLB +) + +// Address represents a server the client connects to. +// This is the EXPERIMENTAL API and may be changed or extended in the future. +type Address struct { + // Addr is the server address on which a connection will be established. + Addr string + // Type is the type of this address. + Type AddressType + // ServerName is the name of this address. + // + // e.g. if Type is GRPCLB, ServerName should be the name of the remote load + // balancer, not the name of the backend. + ServerName string + // Metadata is the information associated with Addr, which may be used + // to make load balancing decision. + Metadata interface{} +} + +// BuildOption includes additional information for the builder to create +// the resolver. +type BuildOption struct { + // DisableServiceConfig indicates whether resolver should fetch service config data. + DisableServiceConfig bool +} + +// ClientConn contains the callbacks for resolver to notify any updates +// to the gRPC ClientConn. +// +// This interface is to be implemented by gRPC. Users should not need a +// brand new implementation of this interface. For the situations like +// testing, the new implementation should embed this interface. This allows +// gRPC to add new methods to this interface. +type ClientConn interface { + // NewAddress is called by resolver to notify ClientConn a new list + // of resolved addresses. + // The address list should be the complete list of resolved addresses. + NewAddress(addresses []Address) + // NewServiceConfig is called by resolver to notify ClientConn a new + // service config. The service config should be provided as a json string. + NewServiceConfig(serviceConfig string) +} + +// Target represents a target for gRPC, as specified in: +// https://github.com/grpc/grpc/blob/master/doc/naming.md. +type Target struct { + Scheme string + Authority string + Endpoint string +} + +// Builder creates a resolver that will be used to watch name resolution updates. +type Builder interface { + // Build creates a new resolver for the given target. + // + // gRPC dial calls Build synchronously, and fails if the returned error is + // not nil. + Build(target Target, cc ClientConn, opts BuildOption) (Resolver, error) + // Scheme returns the scheme supported by this resolver. + // Scheme is defined at https://github.com/grpc/grpc/blob/master/doc/naming.md. + Scheme() string +} + +// ResolveNowOption includes additional information for ResolveNow. +type ResolveNowOption struct{} + +// Resolver watches for the updates on the specified target. +// Updates include address updates and service config updates. +type Resolver interface { + // ResolveNow will be called by gRPC to try to resolve the target name + // again. It's just a hint, resolver can ignore this if it's not necessary. + // + // It could be called multiple times concurrently. + ResolveNow(ResolveNowOption) + // Close closes the resolver. + Close() +} + +// UnregisterForTesting removes the resolver builder with the given scheme from the +// resolver map. +// This function is for testing only. +func UnregisterForTesting(scheme string) { + delete(m, scheme) +} diff --git a/vendor/google.golang.org/grpc/resolver_conn_wrapper.go b/vendor/google.golang.org/grpc/resolver_conn_wrapper.go new file mode 100644 index 00000000000..1b493db2e6c --- /dev/null +++ b/vendor/google.golang.org/grpc/resolver_conn_wrapper.go @@ -0,0 +1,158 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "fmt" + "strings" + + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/resolver" +) + +// ccResolverWrapper is a wrapper on top of cc for resolvers. +// It implements resolver.ClientConnection interface. +type ccResolverWrapper struct { + cc *ClientConn + resolver resolver.Resolver + addrCh chan []resolver.Address + scCh chan string + done chan struct{} +} + +// split2 returns the values from strings.SplitN(s, sep, 2). +// If sep is not found, it returns ("", s, false) instead. +func split2(s, sep string) (string, string, bool) { + spl := strings.SplitN(s, sep, 2) + if len(spl) < 2 { + return "", "", false + } + return spl[0], spl[1], true +} + +// parseTarget splits target into a struct containing scheme, authority and +// endpoint. +// +// If target is not a valid scheme://authority/endpoint, it returns {Endpoint: +// target}. +func parseTarget(target string) (ret resolver.Target) { + var ok bool + ret.Scheme, ret.Endpoint, ok = split2(target, "://") + if !ok { + return resolver.Target{Endpoint: target} + } + ret.Authority, ret.Endpoint, ok = split2(ret.Endpoint, "/") + if !ok { + return resolver.Target{Endpoint: target} + } + return ret +} + +// newCCResolverWrapper parses cc.target for scheme and gets the resolver +// builder for this scheme. It then builds the resolver and starts the +// monitoring goroutine for it. +// +// If withResolverBuilder dial option is set, the specified resolver will be +// used instead. +func newCCResolverWrapper(cc *ClientConn) (*ccResolverWrapper, error) { + rb := cc.dopts.resolverBuilder + if rb == nil { + return nil, fmt.Errorf("could not get resolver for scheme: %q", cc.parsedTarget.Scheme) + } + + ccr := &ccResolverWrapper{ + cc: cc, + addrCh: make(chan []resolver.Address, 1), + scCh: make(chan string, 1), + done: make(chan struct{}), + } + + var err error + ccr.resolver, err = rb.Build(cc.parsedTarget, ccr, resolver.BuildOption{DisableServiceConfig: cc.dopts.disableServiceConfig}) + if err != nil { + return nil, err + } + return ccr, nil +} + +func (ccr *ccResolverWrapper) start() { + go ccr.watcher() +} + +// watcher processes address updates and service config updates sequentially. +// Otherwise, we need to resolve possible races between address and service +// config (e.g. they specify different balancer types). +func (ccr *ccResolverWrapper) watcher() { + for { + select { + case <-ccr.done: + return + default: + } + + select { + case addrs := <-ccr.addrCh: + select { + case <-ccr.done: + return + default: + } + grpclog.Infof("ccResolverWrapper: sending new addresses to cc: %v", addrs) + ccr.cc.handleResolvedAddrs(addrs, nil) + case sc := <-ccr.scCh: + select { + case <-ccr.done: + return + default: + } + grpclog.Infof("ccResolverWrapper: got new service config: %v", sc) + ccr.cc.handleServiceConfig(sc) + case <-ccr.done: + return + } + } +} + +func (ccr *ccResolverWrapper) resolveNow(o resolver.ResolveNowOption) { + ccr.resolver.ResolveNow(o) +} + +func (ccr *ccResolverWrapper) close() { + ccr.resolver.Close() + close(ccr.done) +} + +// NewAddress is called by the resolver implemenetion to send addresses to gRPC. +func (ccr *ccResolverWrapper) NewAddress(addrs []resolver.Address) { + select { + case <-ccr.addrCh: + default: + } + ccr.addrCh <- addrs +} + +// NewServiceConfig is called by the resolver implemenetion to send service +// configs to gPRC. +func (ccr *ccResolverWrapper) NewServiceConfig(sc string) { + select { + case <-ccr.scCh: + default: + } + ccr.scCh <- sc +} diff --git a/vendor/google.golang.org/grpc/rpc_util.go b/vendor/google.golang.org/grpc/rpc_util.go new file mode 100644 index 00000000000..5de1b031ec2 --- /dev/null +++ b/vendor/google.golang.org/grpc/rpc_util.go @@ -0,0 +1,727 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "bytes" + "compress/gzip" + "encoding/binary" + "fmt" + "io" + "io/ioutil" + "math" + "net/url" + "strings" + "sync" + "time" + + "golang.org/x/net/context" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/encoding" + "google.golang.org/grpc/encoding/proto" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/peer" + "google.golang.org/grpc/stats" + "google.golang.org/grpc/status" + "google.golang.org/grpc/transport" +) + +// Compressor defines the interface gRPC uses to compress a message. +// +// Deprecated: use package encoding. +type Compressor interface { + // Do compresses p into w. + Do(w io.Writer, p []byte) error + // Type returns the compression algorithm the Compressor uses. + Type() string +} + +type gzipCompressor struct { + pool sync.Pool +} + +// NewGZIPCompressor creates a Compressor based on GZIP. +// +// Deprecated: use package encoding/gzip. +func NewGZIPCompressor() Compressor { + c, _ := NewGZIPCompressorWithLevel(gzip.DefaultCompression) + return c +} + +// NewGZIPCompressorWithLevel is like NewGZIPCompressor but specifies the gzip compression level instead +// of assuming DefaultCompression. +// +// The error returned will be nil if the level is valid. +// +// Deprecated: use package encoding/gzip. +func NewGZIPCompressorWithLevel(level int) (Compressor, error) { + if level < gzip.DefaultCompression || level > gzip.BestCompression { + return nil, fmt.Errorf("grpc: invalid compression level: %d", level) + } + return &gzipCompressor{ + pool: sync.Pool{ + New: func() interface{} { + w, err := gzip.NewWriterLevel(ioutil.Discard, level) + if err != nil { + panic(err) + } + return w + }, + }, + }, nil +} + +func (c *gzipCompressor) Do(w io.Writer, p []byte) error { + z := c.pool.Get().(*gzip.Writer) + defer c.pool.Put(z) + z.Reset(w) + if _, err := z.Write(p); err != nil { + return err + } + return z.Close() +} + +func (c *gzipCompressor) Type() string { + return "gzip" +} + +// Decompressor defines the interface gRPC uses to decompress a message. +// +// Deprecated: use package encoding. +type Decompressor interface { + // Do reads the data from r and uncompress them. + Do(r io.Reader) ([]byte, error) + // Type returns the compression algorithm the Decompressor uses. + Type() string +} + +type gzipDecompressor struct { + pool sync.Pool +} + +// NewGZIPDecompressor creates a Decompressor based on GZIP. +// +// Deprecated: use package encoding/gzip. +func NewGZIPDecompressor() Decompressor { + return &gzipDecompressor{} +} + +func (d *gzipDecompressor) Do(r io.Reader) ([]byte, error) { + var z *gzip.Reader + switch maybeZ := d.pool.Get().(type) { + case nil: + newZ, err := gzip.NewReader(r) + if err != nil { + return nil, err + } + z = newZ + case *gzip.Reader: + z = maybeZ + if err := z.Reset(r); err != nil { + d.pool.Put(z) + return nil, err + } + } + + defer func() { + z.Close() + d.pool.Put(z) + }() + return ioutil.ReadAll(z) +} + +func (d *gzipDecompressor) Type() string { + return "gzip" +} + +// callInfo contains all related configuration and information about an RPC. +type callInfo struct { + compressorType string + failFast bool + stream *clientStream + traceInfo traceInfo // in trace.go + maxReceiveMessageSize *int + maxSendMessageSize *int + creds credentials.PerRPCCredentials + contentSubtype string + codec baseCodec +} + +func defaultCallInfo() *callInfo { + return &callInfo{failFast: true} +} + +// CallOption configures a Call before it starts or extracts information from +// a Call after it completes. +type CallOption interface { + // before is called before the call is sent to any server. If before + // returns a non-nil error, the RPC fails with that error. + before(*callInfo) error + + // after is called after the call has completed. after cannot return an + // error, so any failures should be reported via output parameters. + after(*callInfo) +} + +// EmptyCallOption does not alter the Call configuration. +// It can be embedded in another structure to carry satellite data for use +// by interceptors. +type EmptyCallOption struct{} + +func (EmptyCallOption) before(*callInfo) error { return nil } +func (EmptyCallOption) after(*callInfo) {} + +// Header returns a CallOptions that retrieves the header metadata +// for a unary RPC. +func Header(md *metadata.MD) CallOption { + return HeaderCallOption{HeaderAddr: md} +} + +// HeaderCallOption is a CallOption for collecting response header metadata. +// The metadata field will be populated *after* the RPC completes. +// This is an EXPERIMENTAL API. +type HeaderCallOption struct { + HeaderAddr *metadata.MD +} + +func (o HeaderCallOption) before(c *callInfo) error { return nil } +func (o HeaderCallOption) after(c *callInfo) { + if c.stream != nil { + *o.HeaderAddr, _ = c.stream.Header() + } +} + +// Trailer returns a CallOptions that retrieves the trailer metadata +// for a unary RPC. +func Trailer(md *metadata.MD) CallOption { + return TrailerCallOption{TrailerAddr: md} +} + +// TrailerCallOption is a CallOption for collecting response trailer metadata. +// The metadata field will be populated *after* the RPC completes. +// This is an EXPERIMENTAL API. +type TrailerCallOption struct { + TrailerAddr *metadata.MD +} + +func (o TrailerCallOption) before(c *callInfo) error { return nil } +func (o TrailerCallOption) after(c *callInfo) { + if c.stream != nil { + *o.TrailerAddr = c.stream.Trailer() + } +} + +// Peer returns a CallOption that retrieves peer information for a unary RPC. +// The peer field will be populated *after* the RPC completes. +func Peer(p *peer.Peer) CallOption { + return PeerCallOption{PeerAddr: p} +} + +// PeerCallOption is a CallOption for collecting the identity of the remote +// peer. The peer field will be populated *after* the RPC completes. +// This is an EXPERIMENTAL API. +type PeerCallOption struct { + PeerAddr *peer.Peer +} + +func (o PeerCallOption) before(c *callInfo) error { return nil } +func (o PeerCallOption) after(c *callInfo) { + if c.stream != nil { + if x, ok := peer.FromContext(c.stream.Context()); ok { + *o.PeerAddr = *x + } + } +} + +// FailFast configures the action to take when an RPC is attempted on broken +// connections or unreachable servers. If failFast is true, the RPC will fail +// immediately. Otherwise, the RPC client will block the call until a +// connection is available (or the call is canceled or times out) and will +// retry the call if it fails due to a transient error. gRPC will not retry if +// data was written to the wire unless the server indicates it did not process +// the data. Please refer to +// https://github.com/grpc/grpc/blob/master/doc/wait-for-ready.md. +// +// By default, RPCs are "Fail Fast". +func FailFast(failFast bool) CallOption { + return FailFastCallOption{FailFast: failFast} +} + +// FailFastCallOption is a CallOption for indicating whether an RPC should fail +// fast or not. +// This is an EXPERIMENTAL API. +type FailFastCallOption struct { + FailFast bool +} + +func (o FailFastCallOption) before(c *callInfo) error { + c.failFast = o.FailFast + return nil +} +func (o FailFastCallOption) after(c *callInfo) {} + +// MaxCallRecvMsgSize returns a CallOption which sets the maximum message size the client can receive. +func MaxCallRecvMsgSize(s int) CallOption { + return MaxRecvMsgSizeCallOption{MaxRecvMsgSize: s} +} + +// MaxRecvMsgSizeCallOption is a CallOption that indicates the maximum message +// size the client can receive. +// This is an EXPERIMENTAL API. +type MaxRecvMsgSizeCallOption struct { + MaxRecvMsgSize int +} + +func (o MaxRecvMsgSizeCallOption) before(c *callInfo) error { + c.maxReceiveMessageSize = &o.MaxRecvMsgSize + return nil +} +func (o MaxRecvMsgSizeCallOption) after(c *callInfo) {} + +// MaxCallSendMsgSize returns a CallOption which sets the maximum message size the client can send. +func MaxCallSendMsgSize(s int) CallOption { + return MaxSendMsgSizeCallOption{MaxSendMsgSize: s} +} + +// MaxSendMsgSizeCallOption is a CallOption that indicates the maximum message +// size the client can send. +// This is an EXPERIMENTAL API. +type MaxSendMsgSizeCallOption struct { + MaxSendMsgSize int +} + +func (o MaxSendMsgSizeCallOption) before(c *callInfo) error { + c.maxSendMessageSize = &o.MaxSendMsgSize + return nil +} +func (o MaxSendMsgSizeCallOption) after(c *callInfo) {} + +// PerRPCCredentials returns a CallOption that sets credentials.PerRPCCredentials +// for a call. +func PerRPCCredentials(creds credentials.PerRPCCredentials) CallOption { + return PerRPCCredsCallOption{Creds: creds} +} + +// PerRPCCredsCallOption is a CallOption that indicates the per-RPC +// credentials to use for the call. +// This is an EXPERIMENTAL API. +type PerRPCCredsCallOption struct { + Creds credentials.PerRPCCredentials +} + +func (o PerRPCCredsCallOption) before(c *callInfo) error { + c.creds = o.Creds + return nil +} +func (o PerRPCCredsCallOption) after(c *callInfo) {} + +// UseCompressor returns a CallOption which sets the compressor used when +// sending the request. If WithCompressor is also set, UseCompressor has +// higher priority. +// +// This API is EXPERIMENTAL. +func UseCompressor(name string) CallOption { + return CompressorCallOption{CompressorType: name} +} + +// CompressorCallOption is a CallOption that indicates the compressor to use. +// This is an EXPERIMENTAL API. +type CompressorCallOption struct { + CompressorType string +} + +func (o CompressorCallOption) before(c *callInfo) error { + c.compressorType = o.CompressorType + return nil +} +func (o CompressorCallOption) after(c *callInfo) {} + +// CallContentSubtype returns a CallOption that will set the content-subtype +// for a call. For example, if content-subtype is "json", the Content-Type over +// the wire will be "application/grpc+json". The content-subtype is converted +// to lowercase before being included in Content-Type. See Content-Type on +// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for +// more details. +// +// If CallCustomCodec is not also used, the content-subtype will be used to +// look up the Codec to use in the registry controlled by RegisterCodec. See +// the documentation on RegisterCodec for details on registration. The lookup +// of content-subtype is case-insensitive. If no such Codec is found, the call +// will result in an error with code codes.Internal. +// +// If CallCustomCodec is also used, that Codec will be used for all request and +// response messages, with the content-subtype set to the given contentSubtype +// here for requests. +func CallContentSubtype(contentSubtype string) CallOption { + return ContentSubtypeCallOption{ContentSubtype: strings.ToLower(contentSubtype)} +} + +// ContentSubtypeCallOption is a CallOption that indicates the content-subtype +// used for marshaling messages. +// This is an EXPERIMENTAL API. +type ContentSubtypeCallOption struct { + ContentSubtype string +} + +func (o ContentSubtypeCallOption) before(c *callInfo) error { + c.contentSubtype = o.ContentSubtype + return nil +} +func (o ContentSubtypeCallOption) after(c *callInfo) {} + +// CallCustomCodec returns a CallOption that will set the given Codec to be +// used for all request and response messages for a call. The result of calling +// String() will be used as the content-subtype in a case-insensitive manner. +// +// See Content-Type on +// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for +// more details. Also see the documentation on RegisterCodec and +// CallContentSubtype for more details on the interaction between Codec and +// content-subtype. +// +// This function is provided for advanced users; prefer to use only +// CallContentSubtype to select a registered codec instead. +func CallCustomCodec(codec Codec) CallOption { + return CustomCodecCallOption{Codec: codec} +} + +// CustomCodecCallOption is a CallOption that indicates the codec used for +// marshaling messages. +// This is an EXPERIMENTAL API. +type CustomCodecCallOption struct { + Codec Codec +} + +func (o CustomCodecCallOption) before(c *callInfo) error { + c.codec = o.Codec + return nil +} +func (o CustomCodecCallOption) after(c *callInfo) {} + +// The format of the payload: compressed or not? +type payloadFormat uint8 + +const ( + compressionNone payloadFormat = iota // no compression + compressionMade +) + +// parser reads complete gRPC messages from the underlying reader. +type parser struct { + // r is the underlying reader. + // See the comment on recvMsg for the permissible + // error types. + r io.Reader + + // The header of a gRPC message. Find more detail at + // https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md + header [5]byte +} + +// recvMsg reads a complete gRPC message from the stream. +// +// It returns the message and its payload (compression/encoding) +// format. The caller owns the returned msg memory. +// +// If there is an error, possible values are: +// * io.EOF, when no messages remain +// * io.ErrUnexpectedEOF +// * of type transport.ConnectionError +// * of type transport.StreamError +// No other error values or types must be returned, which also means +// that the underlying io.Reader must not return an incompatible +// error. +func (p *parser) recvMsg(maxReceiveMessageSize int) (pf payloadFormat, msg []byte, err error) { + if _, err := p.r.Read(p.header[:]); err != nil { + return 0, nil, err + } + + pf = payloadFormat(p.header[0]) + length := binary.BigEndian.Uint32(p.header[1:]) + + if length == 0 { + return pf, nil, nil + } + if int64(length) > int64(maxInt) { + return 0, nil, status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max length allowed on current machine (%d vs. %d)", length, maxInt) + } + if int(length) > maxReceiveMessageSize { + return 0, nil, status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max (%d vs. %d)", length, maxReceiveMessageSize) + } + // TODO(bradfitz,zhaoq): garbage. reuse buffer after proto decoding instead + // of making it for each message: + msg = make([]byte, int(length)) + if _, err := p.r.Read(msg); err != nil { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + return 0, nil, err + } + return pf, msg, nil +} + +// encode serializes msg and returns a buffer of message header and a buffer of msg. +// If msg is nil, it generates the message header and an empty msg buffer. +// TODO(ddyihai): eliminate extra Compressor parameter. +func encode(c baseCodec, msg interface{}, cp Compressor, outPayload *stats.OutPayload, compressor encoding.Compressor) ([]byte, []byte, error) { + var ( + b []byte + cbuf *bytes.Buffer + ) + const ( + payloadLen = 1 + sizeLen = 4 + ) + if msg != nil { + var err error + b, err = c.Marshal(msg) + if err != nil { + return nil, nil, status.Errorf(codes.Internal, "grpc: error while marshaling: %v", err.Error()) + } + if outPayload != nil { + outPayload.Payload = msg + // TODO truncate large payload. + outPayload.Data = b + outPayload.Length = len(b) + } + if compressor != nil || cp != nil { + cbuf = new(bytes.Buffer) + // Has compressor, check Compressor is set by UseCompressor first. + if compressor != nil { + z, _ := compressor.Compress(cbuf) + if _, err := z.Write(b); err != nil { + return nil, nil, status.Errorf(codes.Internal, "grpc: error while compressing: %v", err.Error()) + } + z.Close() + } else { + // If Compressor is not set by UseCompressor, use default Compressor + if err := cp.Do(cbuf, b); err != nil { + return nil, nil, status.Errorf(codes.Internal, "grpc: error while compressing: %v", err.Error()) + } + } + b = cbuf.Bytes() + } + } + if uint(len(b)) > math.MaxUint32 { + return nil, nil, status.Errorf(codes.ResourceExhausted, "grpc: message too large (%d bytes)", len(b)) + } + + bufHeader := make([]byte, payloadLen+sizeLen) + if compressor != nil || cp != nil { + bufHeader[0] = byte(compressionMade) + } else { + bufHeader[0] = byte(compressionNone) + } + + // Write length of b into buf + binary.BigEndian.PutUint32(bufHeader[payloadLen:], uint32(len(b))) + if outPayload != nil { + outPayload.WireLength = payloadLen + sizeLen + len(b) + } + return bufHeader, b, nil +} + +func checkRecvPayload(pf payloadFormat, recvCompress string, haveCompressor bool) *status.Status { + switch pf { + case compressionNone: + case compressionMade: + if recvCompress == "" || recvCompress == encoding.Identity { + return status.New(codes.Internal, "grpc: compressed flag set with identity or empty encoding") + } + if !haveCompressor { + return status.Newf(codes.Unimplemented, "grpc: Decompressor is not installed for grpc-encoding %q", recvCompress) + } + default: + return status.Newf(codes.Internal, "grpc: received unexpected payload format %d", pf) + } + return nil +} + +// For the two compressor parameters, both should not be set, but if they are, +// dc takes precedence over compressor. +// TODO(dfawley): wrap the old compressor/decompressor using the new API? +func recv(p *parser, c baseCodec, s *transport.Stream, dc Decompressor, m interface{}, maxReceiveMessageSize int, inPayload *stats.InPayload, compressor encoding.Compressor) error { + pf, d, err := p.recvMsg(maxReceiveMessageSize) + if err != nil { + return err + } + if inPayload != nil { + inPayload.WireLength = len(d) + } + + if st := checkRecvPayload(pf, s.RecvCompress(), compressor != nil || dc != nil); st != nil { + return st.Err() + } + + if pf == compressionMade { + // To match legacy behavior, if the decompressor is set by WithDecompressor or RPCDecompressor, + // use this decompressor as the default. + if dc != nil { + d, err = dc.Do(bytes.NewReader(d)) + if err != nil { + return status.Errorf(codes.Internal, "grpc: failed to decompress the received message %v", err) + } + } else { + dcReader, err := compressor.Decompress(bytes.NewReader(d)) + if err != nil { + return status.Errorf(codes.Internal, "grpc: failed to decompress the received message %v", err) + } + d, err = ioutil.ReadAll(dcReader) + if err != nil { + return status.Errorf(codes.Internal, "grpc: failed to decompress the received message %v", err) + } + } + } + if len(d) > maxReceiveMessageSize { + // TODO: Revisit the error code. Currently keep it consistent with java + // implementation. + return status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max (%d vs. %d)", len(d), maxReceiveMessageSize) + } + if err := c.Unmarshal(d, m); err != nil { + return status.Errorf(codes.Internal, "grpc: failed to unmarshal the received message %v", err) + } + if inPayload != nil { + inPayload.RecvTime = time.Now() + inPayload.Payload = m + // TODO truncate large payload. + inPayload.Data = d + inPayload.Length = len(d) + } + return nil +} + +type rpcInfo struct { + failfast bool +} + +type rpcInfoContextKey struct{} + +func newContextWithRPCInfo(ctx context.Context, failfast bool) context.Context { + return context.WithValue(ctx, rpcInfoContextKey{}, &rpcInfo{failfast: failfast}) +} + +func rpcInfoFromContext(ctx context.Context) (s *rpcInfo, ok bool) { + s, ok = ctx.Value(rpcInfoContextKey{}).(*rpcInfo) + return +} + +// Code returns the error code for err if it was produced by the rpc system. +// Otherwise, it returns codes.Unknown. +// +// Deprecated: use status.FromError and Code method instead. +func Code(err error) codes.Code { + if s, ok := status.FromError(err); ok { + return s.Code() + } + return codes.Unknown +} + +// ErrorDesc returns the error description of err if it was produced by the rpc system. +// Otherwise, it returns err.Error() or empty string when err is nil. +// +// Deprecated: use status.FromError and Message method instead. +func ErrorDesc(err error) string { + if s, ok := status.FromError(err); ok { + return s.Message() + } + return err.Error() +} + +// Errorf returns an error containing an error code and a description; +// Errorf returns nil if c is OK. +// +// Deprecated: use status.Errorf instead. +func Errorf(c codes.Code, format string, a ...interface{}) error { + return status.Errorf(c, format, a...) +} + +// setCallInfoCodec should only be called after CallOptions have been applied. +func setCallInfoCodec(c *callInfo) error { + if c.codec != nil { + // codec was already set by a CallOption; use it. + return nil + } + + if c.contentSubtype == "" { + // No codec specified in CallOptions; use proto by default. + c.codec = encoding.GetCodec(proto.Name) + return nil + } + + // c.contentSubtype is already lowercased in CallContentSubtype + c.codec = encoding.GetCodec(c.contentSubtype) + if c.codec == nil { + return status.Errorf(codes.Internal, "no codec registered for content-subtype %s", c.contentSubtype) + } + return nil +} + +// parseDialTarget returns the network and address to pass to dialer +func parseDialTarget(target string) (net string, addr string) { + net = "tcp" + + m1 := strings.Index(target, ":") + m2 := strings.Index(target, ":/") + + // handle unix:addr which will fail with url.Parse + if m1 >= 0 && m2 < 0 { + if n := target[0:m1]; n == "unix" { + net = n + addr = target[m1+1:] + return net, addr + } + } + if m2 >= 0 { + t, err := url.Parse(target) + if err != nil { + return net, target + } + scheme := t.Scheme + addr = t.Path + if scheme == "unix" { + net = scheme + if addr == "" { + addr = t.Host + } + return net, addr + } + } + + return net, target +} + +// The SupportPackageIsVersion variables are referenced from generated protocol +// buffer files to ensure compatibility with the gRPC version used. The latest +// support package version is 5. +// +// Older versions are kept for compatibility. They may be removed if +// compatibility cannot be maintained. +// +// These constants should not be referenced from any other code. +const ( + SupportPackageIsVersion3 = true + SupportPackageIsVersion4 = true + SupportPackageIsVersion5 = true +) + +// Version is the current grpc version. +const Version = "1.12.2" + +const grpcUA = "grpc-go/" + Version diff --git a/vendor/google.golang.org/grpc/server.go b/vendor/google.golang.org/grpc/server.go new file mode 100644 index 00000000000..4969331cb3d --- /dev/null +++ b/vendor/google.golang.org/grpc/server.go @@ -0,0 +1,1486 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "bytes" + "errors" + "fmt" + "io" + "math" + "net" + "net/http" + "reflect" + "runtime" + "strings" + "sync" + "time" + + "io/ioutil" + + "golang.org/x/net/context" + "golang.org/x/net/http2" + "golang.org/x/net/trace" + + "google.golang.org/grpc/channelz" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/encoding" + "google.golang.org/grpc/encoding/proto" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/internal" + "google.golang.org/grpc/keepalive" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/stats" + "google.golang.org/grpc/status" + "google.golang.org/grpc/tap" + "google.golang.org/grpc/transport" +) + +const ( + defaultServerMaxReceiveMessageSize = 1024 * 1024 * 4 + defaultServerMaxSendMessageSize = math.MaxInt32 +) + +type methodHandler func(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor UnaryServerInterceptor) (interface{}, error) + +// MethodDesc represents an RPC service's method specification. +type MethodDesc struct { + MethodName string + Handler methodHandler +} + +// ServiceDesc represents an RPC service's specification. +type ServiceDesc struct { + ServiceName string + // The pointer to the service interface. Used to check whether the user + // provided implementation satisfies the interface requirements. + HandlerType interface{} + Methods []MethodDesc + Streams []StreamDesc + Metadata interface{} +} + +// service consists of the information of the server serving this service and +// the methods in this service. +type service struct { + server interface{} // the server for service methods + md map[string]*MethodDesc + sd map[string]*StreamDesc + mdata interface{} +} + +// Server is a gRPC server to serve RPC requests. +type Server struct { + opts options + + mu sync.Mutex // guards following + lis map[net.Listener]bool + conns map[io.Closer]bool + serve bool + drain bool + cv *sync.Cond // signaled when connections close for GracefulStop + m map[string]*service // service name -> service info + events trace.EventLog + + quit chan struct{} + done chan struct{} + quitOnce sync.Once + doneOnce sync.Once + channelzRemoveOnce sync.Once + serveWG sync.WaitGroup // counts active Serve goroutines for GracefulStop + + channelzID int64 // channelz unique identification number + czmu sync.RWMutex + callsStarted int64 + callsFailed int64 + callsSucceeded int64 + lastCallStartedTime time.Time +} + +type options struct { + creds credentials.TransportCredentials + codec baseCodec + cp Compressor + dc Decompressor + unaryInt UnaryServerInterceptor + streamInt StreamServerInterceptor + inTapHandle tap.ServerInHandle + statsHandler stats.Handler + maxConcurrentStreams uint32 + maxReceiveMessageSize int + maxSendMessageSize int + useHandlerImpl bool // use http.Handler-based server + unknownStreamDesc *StreamDesc + keepaliveParams keepalive.ServerParameters + keepalivePolicy keepalive.EnforcementPolicy + initialWindowSize int32 + initialConnWindowSize int32 + writeBufferSize int + readBufferSize int + connectionTimeout time.Duration +} + +var defaultServerOptions = options{ + maxReceiveMessageSize: defaultServerMaxReceiveMessageSize, + maxSendMessageSize: defaultServerMaxSendMessageSize, + connectionTimeout: 120 * time.Second, +} + +// A ServerOption sets options such as credentials, codec and keepalive parameters, etc. +type ServerOption func(*options) + +// WriteBufferSize lets you set the size of write buffer, this determines how much data can be batched +// before doing a write on the wire. +func WriteBufferSize(s int) ServerOption { + return func(o *options) { + o.writeBufferSize = s + } +} + +// ReadBufferSize lets you set the size of read buffer, this determines how much data can be read at most +// for one read syscall. +func ReadBufferSize(s int) ServerOption { + return func(o *options) { + o.readBufferSize = s + } +} + +// InitialWindowSize returns a ServerOption that sets window size for stream. +// The lower bound for window size is 64K and any value smaller than that will be ignored. +func InitialWindowSize(s int32) ServerOption { + return func(o *options) { + o.initialWindowSize = s + } +} + +// InitialConnWindowSize returns a ServerOption that sets window size for a connection. +// The lower bound for window size is 64K and any value smaller than that will be ignored. +func InitialConnWindowSize(s int32) ServerOption { + return func(o *options) { + o.initialConnWindowSize = s + } +} + +// KeepaliveParams returns a ServerOption that sets keepalive and max-age parameters for the server. +func KeepaliveParams(kp keepalive.ServerParameters) ServerOption { + return func(o *options) { + o.keepaliveParams = kp + } +} + +// KeepaliveEnforcementPolicy returns a ServerOption that sets keepalive enforcement policy for the server. +func KeepaliveEnforcementPolicy(kep keepalive.EnforcementPolicy) ServerOption { + return func(o *options) { + o.keepalivePolicy = kep + } +} + +// CustomCodec returns a ServerOption that sets a codec for message marshaling and unmarshaling. +// +// This will override any lookups by content-subtype for Codecs registered with RegisterCodec. +func CustomCodec(codec Codec) ServerOption { + return func(o *options) { + o.codec = codec + } +} + +// RPCCompressor returns a ServerOption that sets a compressor for outbound +// messages. For backward compatibility, all outbound messages will be sent +// using this compressor, regardless of incoming message compression. By +// default, server messages will be sent using the same compressor with which +// request messages were sent. +// +// Deprecated: use encoding.RegisterCompressor instead. +func RPCCompressor(cp Compressor) ServerOption { + return func(o *options) { + o.cp = cp + } +} + +// RPCDecompressor returns a ServerOption that sets a decompressor for inbound +// messages. It has higher priority than decompressors registered via +// encoding.RegisterCompressor. +// +// Deprecated: use encoding.RegisterCompressor instead. +func RPCDecompressor(dc Decompressor) ServerOption { + return func(o *options) { + o.dc = dc + } +} + +// MaxMsgSize returns a ServerOption to set the max message size in bytes the server can receive. +// If this is not set, gRPC uses the default limit. +// +// Deprecated: use MaxRecvMsgSize instead. +func MaxMsgSize(m int) ServerOption { + return MaxRecvMsgSize(m) +} + +// MaxRecvMsgSize returns a ServerOption to set the max message size in bytes the server can receive. +// If this is not set, gRPC uses the default 4MB. +func MaxRecvMsgSize(m int) ServerOption { + return func(o *options) { + o.maxReceiveMessageSize = m + } +} + +// MaxSendMsgSize returns a ServerOption to set the max message size in bytes the server can send. +// If this is not set, gRPC uses the default 4MB. +func MaxSendMsgSize(m int) ServerOption { + return func(o *options) { + o.maxSendMessageSize = m + } +} + +// MaxConcurrentStreams returns a ServerOption that will apply a limit on the number +// of concurrent streams to each ServerTransport. +func MaxConcurrentStreams(n uint32) ServerOption { + return func(o *options) { + o.maxConcurrentStreams = n + } +} + +// Creds returns a ServerOption that sets credentials for server connections. +func Creds(c credentials.TransportCredentials) ServerOption { + return func(o *options) { + o.creds = c + } +} + +// UnaryInterceptor returns a ServerOption that sets the UnaryServerInterceptor for the +// server. Only one unary interceptor can be installed. The construction of multiple +// interceptors (e.g., chaining) can be implemented at the caller. +func UnaryInterceptor(i UnaryServerInterceptor) ServerOption { + return func(o *options) { + if o.unaryInt != nil { + panic("The unary server interceptor was already set and may not be reset.") + } + o.unaryInt = i + } +} + +// StreamInterceptor returns a ServerOption that sets the StreamServerInterceptor for the +// server. Only one stream interceptor can be installed. +func StreamInterceptor(i StreamServerInterceptor) ServerOption { + return func(o *options) { + if o.streamInt != nil { + panic("The stream server interceptor was already set and may not be reset.") + } + o.streamInt = i + } +} + +// InTapHandle returns a ServerOption that sets the tap handle for all the server +// transport to be created. Only one can be installed. +func InTapHandle(h tap.ServerInHandle) ServerOption { + return func(o *options) { + if o.inTapHandle != nil { + panic("The tap handle was already set and may not be reset.") + } + o.inTapHandle = h + } +} + +// StatsHandler returns a ServerOption that sets the stats handler for the server. +func StatsHandler(h stats.Handler) ServerOption { + return func(o *options) { + o.statsHandler = h + } +} + +// UnknownServiceHandler returns a ServerOption that allows for adding a custom +// unknown service handler. The provided method is a bidi-streaming RPC service +// handler that will be invoked instead of returning the "unimplemented" gRPC +// error whenever a request is received for an unregistered service or method. +// The handling function has full access to the Context of the request and the +// stream, and the invocation bypasses interceptors. +func UnknownServiceHandler(streamHandler StreamHandler) ServerOption { + return func(o *options) { + o.unknownStreamDesc = &StreamDesc{ + StreamName: "unknown_service_handler", + Handler: streamHandler, + // We need to assume that the users of the streamHandler will want to use both. + ClientStreams: true, + ServerStreams: true, + } + } +} + +// ConnectionTimeout returns a ServerOption that sets the timeout for +// connection establishment (up to and including HTTP/2 handshaking) for all +// new connections. If this is not set, the default is 120 seconds. A zero or +// negative value will result in an immediate timeout. +// +// This API is EXPERIMENTAL. +func ConnectionTimeout(d time.Duration) ServerOption { + return func(o *options) { + o.connectionTimeout = d + } +} + +// NewServer creates a gRPC server which has no service registered and has not +// started to accept requests yet. +func NewServer(opt ...ServerOption) *Server { + opts := defaultServerOptions + for _, o := range opt { + o(&opts) + } + s := &Server{ + lis: make(map[net.Listener]bool), + opts: opts, + conns: make(map[io.Closer]bool), + m: make(map[string]*service), + quit: make(chan struct{}), + done: make(chan struct{}), + } + s.cv = sync.NewCond(&s.mu) + if EnableTracing { + _, file, line, _ := runtime.Caller(1) + s.events = trace.NewEventLog("grpc.Server", fmt.Sprintf("%s:%d", file, line)) + } + + if channelz.IsOn() { + s.channelzID = channelz.RegisterServer(s, "") + } + return s +} + +// printf records an event in s's event log, unless s has been stopped. +// REQUIRES s.mu is held. +func (s *Server) printf(format string, a ...interface{}) { + if s.events != nil { + s.events.Printf(format, a...) + } +} + +// errorf records an error in s's event log, unless s has been stopped. +// REQUIRES s.mu is held. +func (s *Server) errorf(format string, a ...interface{}) { + if s.events != nil { + s.events.Errorf(format, a...) + } +} + +// RegisterService registers a service and its implementation to the gRPC +// server. It is called from the IDL generated code. This must be called before +// invoking Serve. +func (s *Server) RegisterService(sd *ServiceDesc, ss interface{}) { + ht := reflect.TypeOf(sd.HandlerType).Elem() + st := reflect.TypeOf(ss) + if !st.Implements(ht) { + grpclog.Fatalf("grpc: Server.RegisterService found the handler of type %v that does not satisfy %v", st, ht) + } + s.register(sd, ss) +} + +func (s *Server) register(sd *ServiceDesc, ss interface{}) { + s.mu.Lock() + defer s.mu.Unlock() + s.printf("RegisterService(%q)", sd.ServiceName) + if s.serve { + grpclog.Fatalf("grpc: Server.RegisterService after Server.Serve for %q", sd.ServiceName) + } + if _, ok := s.m[sd.ServiceName]; ok { + grpclog.Fatalf("grpc: Server.RegisterService found duplicate service registration for %q", sd.ServiceName) + } + srv := &service{ + server: ss, + md: make(map[string]*MethodDesc), + sd: make(map[string]*StreamDesc), + mdata: sd.Metadata, + } + for i := range sd.Methods { + d := &sd.Methods[i] + srv.md[d.MethodName] = d + } + for i := range sd.Streams { + d := &sd.Streams[i] + srv.sd[d.StreamName] = d + } + s.m[sd.ServiceName] = srv +} + +// MethodInfo contains the information of an RPC including its method name and type. +type MethodInfo struct { + // Name is the method name only, without the service name or package name. + Name string + // IsClientStream indicates whether the RPC is a client streaming RPC. + IsClientStream bool + // IsServerStream indicates whether the RPC is a server streaming RPC. + IsServerStream bool +} + +// ServiceInfo contains unary RPC method info, streaming RPC method info and metadata for a service. +type ServiceInfo struct { + Methods []MethodInfo + // Metadata is the metadata specified in ServiceDesc when registering service. + Metadata interface{} +} + +// GetServiceInfo returns a map from service names to ServiceInfo. +// Service names include the package names, in the form of .. +func (s *Server) GetServiceInfo() map[string]ServiceInfo { + ret := make(map[string]ServiceInfo) + for n, srv := range s.m { + methods := make([]MethodInfo, 0, len(srv.md)+len(srv.sd)) + for m := range srv.md { + methods = append(methods, MethodInfo{ + Name: m, + IsClientStream: false, + IsServerStream: false, + }) + } + for m, d := range srv.sd { + methods = append(methods, MethodInfo{ + Name: m, + IsClientStream: d.ClientStreams, + IsServerStream: d.ServerStreams, + }) + } + + ret[n] = ServiceInfo{ + Methods: methods, + Metadata: srv.mdata, + } + } + return ret +} + +// ErrServerStopped indicates that the operation is now illegal because of +// the server being stopped. +var ErrServerStopped = errors.New("grpc: the server has been stopped") + +func (s *Server) useTransportAuthenticator(rawConn net.Conn) (net.Conn, credentials.AuthInfo, error) { + if s.opts.creds == nil { + return rawConn, nil, nil + } + return s.opts.creds.ServerHandshake(rawConn) +} + +type listenSocket struct { + net.Listener + channelzID int64 +} + +func (l *listenSocket) ChannelzMetric() *channelz.SocketInternalMetric { + return &channelz.SocketInternalMetric{ + LocalAddr: l.Listener.Addr(), + } +} + +func (l *listenSocket) Close() error { + err := l.Listener.Close() + if channelz.IsOn() { + channelz.RemoveEntry(l.channelzID) + } + return err +} + +// Serve accepts incoming connections on the listener lis, creating a new +// ServerTransport and service goroutine for each. The service goroutines +// read gRPC requests and then call the registered handlers to reply to them. +// Serve returns when lis.Accept fails with fatal errors. lis will be closed when +// this method returns. +// Serve will return a non-nil error unless Stop or GracefulStop is called. +func (s *Server) Serve(lis net.Listener) error { + s.mu.Lock() + s.printf("serving") + s.serve = true + if s.lis == nil { + // Serve called after Stop or GracefulStop. + s.mu.Unlock() + lis.Close() + return ErrServerStopped + } + + s.serveWG.Add(1) + defer func() { + s.serveWG.Done() + select { + // Stop or GracefulStop called; block until done and return nil. + case <-s.quit: + <-s.done + default: + } + }() + + ls := &listenSocket{Listener: lis} + s.lis[ls] = true + + if channelz.IsOn() { + ls.channelzID = channelz.RegisterListenSocket(ls, s.channelzID, "") + } + s.mu.Unlock() + + defer func() { + s.mu.Lock() + if s.lis != nil && s.lis[ls] { + ls.Close() + delete(s.lis, ls) + } + s.mu.Unlock() + }() + + var tempDelay time.Duration // how long to sleep on accept failure + + for { + rawConn, err := lis.Accept() + if err != nil { + if ne, ok := err.(interface { + Temporary() bool + }); ok && ne.Temporary() { + if tempDelay == 0 { + tempDelay = 5 * time.Millisecond + } else { + tempDelay *= 2 + } + if max := 1 * time.Second; tempDelay > max { + tempDelay = max + } + s.mu.Lock() + s.printf("Accept error: %v; retrying in %v", err, tempDelay) + s.mu.Unlock() + timer := time.NewTimer(tempDelay) + select { + case <-timer.C: + case <-s.quit: + timer.Stop() + return nil + } + continue + } + s.mu.Lock() + s.printf("done serving; Accept = %v", err) + s.mu.Unlock() + + select { + case <-s.quit: + return nil + default: + } + return err + } + tempDelay = 0 + // Start a new goroutine to deal with rawConn so we don't stall this Accept + // loop goroutine. + // + // Make sure we account for the goroutine so GracefulStop doesn't nil out + // s.conns before this conn can be added. + s.serveWG.Add(1) + go func() { + s.handleRawConn(rawConn) + s.serveWG.Done() + }() + } +} + +// handleRawConn forks a goroutine to handle a just-accepted connection that +// has not had any I/O performed on it yet. +func (s *Server) handleRawConn(rawConn net.Conn) { + rawConn.SetDeadline(time.Now().Add(s.opts.connectionTimeout)) + conn, authInfo, err := s.useTransportAuthenticator(rawConn) + if err != nil { + s.mu.Lock() + s.errorf("ServerHandshake(%q) failed: %v", rawConn.RemoteAddr(), err) + s.mu.Unlock() + grpclog.Warningf("grpc: Server.Serve failed to complete security handshake from %q: %v", rawConn.RemoteAddr(), err) + // If serverHandshake returns ErrConnDispatched, keep rawConn open. + if err != credentials.ErrConnDispatched { + rawConn.Close() + } + rawConn.SetDeadline(time.Time{}) + return + } + + s.mu.Lock() + if s.conns == nil { + s.mu.Unlock() + conn.Close() + return + } + s.mu.Unlock() + + var serve func() + c := conn.(io.Closer) + if s.opts.useHandlerImpl { + serve = func() { s.serveUsingHandler(conn) } + } else { + // Finish handshaking (HTTP2) + st := s.newHTTP2Transport(conn, authInfo) + if st == nil { + return + } + c = st + serve = func() { s.serveStreams(st) } + } + + rawConn.SetDeadline(time.Time{}) + if !s.addConn(c) { + return + } + go func() { + serve() + s.removeConn(c) + }() +} + +// newHTTP2Transport sets up a http/2 transport (using the +// gRPC http2 server transport in transport/http2_server.go). +func (s *Server) newHTTP2Transport(c net.Conn, authInfo credentials.AuthInfo) transport.ServerTransport { + config := &transport.ServerConfig{ + MaxStreams: s.opts.maxConcurrentStreams, + AuthInfo: authInfo, + InTapHandle: s.opts.inTapHandle, + StatsHandler: s.opts.statsHandler, + KeepaliveParams: s.opts.keepaliveParams, + KeepalivePolicy: s.opts.keepalivePolicy, + InitialWindowSize: s.opts.initialWindowSize, + InitialConnWindowSize: s.opts.initialConnWindowSize, + WriteBufferSize: s.opts.writeBufferSize, + ReadBufferSize: s.opts.readBufferSize, + ChannelzParentID: s.channelzID, + } + st, err := transport.NewServerTransport("http2", c, config) + if err != nil { + s.mu.Lock() + s.errorf("NewServerTransport(%q) failed: %v", c.RemoteAddr(), err) + s.mu.Unlock() + c.Close() + grpclog.Warningln("grpc: Server.Serve failed to create ServerTransport: ", err) + return nil + } + + return st +} + +func (s *Server) serveStreams(st transport.ServerTransport) { + defer st.Close() + var wg sync.WaitGroup + st.HandleStreams(func(stream *transport.Stream) { + wg.Add(1) + go func() { + defer wg.Done() + s.handleStream(st, stream, s.traceInfo(st, stream)) + }() + }, func(ctx context.Context, method string) context.Context { + if !EnableTracing { + return ctx + } + tr := trace.New("grpc.Recv."+methodFamily(method), method) + return trace.NewContext(ctx, tr) + }) + wg.Wait() +} + +var _ http.Handler = (*Server)(nil) + +// serveUsingHandler is called from handleRawConn when s is configured +// to handle requests via the http.Handler interface. It sets up a +// net/http.Server to handle the just-accepted conn. The http.Server +// is configured to route all incoming requests (all HTTP/2 streams) +// to ServeHTTP, which creates a new ServerTransport for each stream. +// serveUsingHandler blocks until conn closes. +// +// This codepath is only used when Server.TestingUseHandlerImpl has +// been configured. This lets the end2end tests exercise the ServeHTTP +// method as one of the environment types. +// +// conn is the *tls.Conn that's already been authenticated. +func (s *Server) serveUsingHandler(conn net.Conn) { + h2s := &http2.Server{ + MaxConcurrentStreams: s.opts.maxConcurrentStreams, + } + h2s.ServeConn(conn, &http2.ServeConnOpts{ + Handler: s, + }) +} + +// ServeHTTP implements the Go standard library's http.Handler +// interface by responding to the gRPC request r, by looking up +// the requested gRPC method in the gRPC server s. +// +// The provided HTTP request must have arrived on an HTTP/2 +// connection. When using the Go standard library's server, +// practically this means that the Request must also have arrived +// over TLS. +// +// To share one port (such as 443 for https) between gRPC and an +// existing http.Handler, use a root http.Handler such as: +// +// if r.ProtoMajor == 2 && strings.HasPrefix( +// r.Header.Get("Content-Type"), "application/grpc") { +// grpcServer.ServeHTTP(w, r) +// } else { +// yourMux.ServeHTTP(w, r) +// } +// +// Note that ServeHTTP uses Go's HTTP/2 server implementation which is totally +// separate from grpc-go's HTTP/2 server. Performance and features may vary +// between the two paths. ServeHTTP does not support some gRPC features +// available through grpc-go's HTTP/2 server, and it is currently EXPERIMENTAL +// and subject to change. +func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) { + st, err := transport.NewServerHandlerTransport(w, r, s.opts.statsHandler) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + if !s.addConn(st) { + return + } + defer s.removeConn(st) + s.serveStreams(st) +} + +// traceInfo returns a traceInfo and associates it with stream, if tracing is enabled. +// If tracing is not enabled, it returns nil. +func (s *Server) traceInfo(st transport.ServerTransport, stream *transport.Stream) (trInfo *traceInfo) { + tr, ok := trace.FromContext(stream.Context()) + if !ok { + return nil + } + + trInfo = &traceInfo{ + tr: tr, + } + trInfo.firstLine.client = false + trInfo.firstLine.remoteAddr = st.RemoteAddr() + + if dl, ok := stream.Context().Deadline(); ok { + trInfo.firstLine.deadline = dl.Sub(time.Now()) + } + return trInfo +} + +func (s *Server) addConn(c io.Closer) bool { + s.mu.Lock() + defer s.mu.Unlock() + if s.conns == nil { + c.Close() + return false + } + if s.drain { + // Transport added after we drained our existing conns: drain it + // immediately. + c.(transport.ServerTransport).Drain() + } + s.conns[c] = true + return true +} + +func (s *Server) removeConn(c io.Closer) { + s.mu.Lock() + defer s.mu.Unlock() + if s.conns != nil { + delete(s.conns, c) + s.cv.Broadcast() + } +} + +// ChannelzMetric returns ServerInternalMetric of current server. +// This is an EXPERIMENTAL API. +func (s *Server) ChannelzMetric() *channelz.ServerInternalMetric { + s.czmu.RLock() + defer s.czmu.RUnlock() + return &channelz.ServerInternalMetric{ + CallsStarted: s.callsStarted, + CallsSucceeded: s.callsSucceeded, + CallsFailed: s.callsFailed, + LastCallStartedTimestamp: s.lastCallStartedTime, + } +} + +func (s *Server) incrCallsStarted() { + s.czmu.Lock() + s.callsStarted++ + s.lastCallStartedTime = time.Now() + s.czmu.Unlock() +} + +func (s *Server) incrCallsSucceeded() { + s.czmu.Lock() + s.callsSucceeded++ + s.czmu.Unlock() +} + +func (s *Server) incrCallsFailed() { + s.czmu.Lock() + s.callsFailed++ + s.czmu.Unlock() +} + +func (s *Server) sendResponse(t transport.ServerTransport, stream *transport.Stream, msg interface{}, cp Compressor, opts *transport.Options, comp encoding.Compressor) error { + var ( + outPayload *stats.OutPayload + ) + if s.opts.statsHandler != nil { + outPayload = &stats.OutPayload{} + } + hdr, data, err := encode(s.getCodec(stream.ContentSubtype()), msg, cp, outPayload, comp) + if err != nil { + grpclog.Errorln("grpc: server failed to encode response: ", err) + return err + } + if len(data) > s.opts.maxSendMessageSize { + return status.Errorf(codes.ResourceExhausted, "grpc: trying to send message larger than max (%d vs. %d)", len(data), s.opts.maxSendMessageSize) + } + err = t.Write(stream, hdr, data, opts) + if err == nil && outPayload != nil { + outPayload.SentTime = time.Now() + s.opts.statsHandler.HandleRPC(stream.Context(), outPayload) + } + return err +} + +func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.Stream, srv *service, md *MethodDesc, trInfo *traceInfo) (err error) { + if channelz.IsOn() { + s.incrCallsStarted() + defer func() { + if err != nil && err != io.EOF { + s.incrCallsFailed() + } else { + s.incrCallsSucceeded() + } + }() + } + sh := s.opts.statsHandler + if sh != nil { + beginTime := time.Now() + begin := &stats.Begin{ + BeginTime: beginTime, + } + sh.HandleRPC(stream.Context(), begin) + defer func() { + end := &stats.End{ + BeginTime: beginTime, + EndTime: time.Now(), + } + if err != nil && err != io.EOF { + end.Error = toRPCErr(err) + } + sh.HandleRPC(stream.Context(), end) + }() + } + if trInfo != nil { + defer trInfo.tr.Finish() + trInfo.firstLine.client = false + trInfo.tr.LazyLog(&trInfo.firstLine, false) + defer func() { + if err != nil && err != io.EOF { + trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) + trInfo.tr.SetError() + } + }() + } + + // comp and cp are used for compression. decomp and dc are used for + // decompression. If comp and decomp are both set, they are the same; + // however they are kept separate to ensure that at most one of the + // compressor/decompressor variable pairs are set for use later. + var comp, decomp encoding.Compressor + var cp Compressor + var dc Decompressor + + // If dc is set and matches the stream's compression, use it. Otherwise, try + // to find a matching registered compressor for decomp. + if rc := stream.RecvCompress(); s.opts.dc != nil && s.opts.dc.Type() == rc { + dc = s.opts.dc + } else if rc != "" && rc != encoding.Identity { + decomp = encoding.GetCompressor(rc) + if decomp == nil { + st := status.Newf(codes.Unimplemented, "grpc: Decompressor is not installed for grpc-encoding %q", rc) + t.WriteStatus(stream, st) + return st.Err() + } + } + + // If cp is set, use it. Otherwise, attempt to compress the response using + // the incoming message compression method. + // + // NOTE: this needs to be ahead of all handling, https://github.com/grpc/grpc-go/issues/686. + if s.opts.cp != nil { + cp = s.opts.cp + stream.SetSendCompress(cp.Type()) + } else if rc := stream.RecvCompress(); rc != "" && rc != encoding.Identity { + // Legacy compressor not specified; attempt to respond with same encoding. + comp = encoding.GetCompressor(rc) + if comp != nil { + stream.SetSendCompress(rc) + } + } + + p := &parser{r: stream} + pf, req, err := p.recvMsg(s.opts.maxReceiveMessageSize) + if err == io.EOF { + // The entire stream is done (for unary RPC only). + return err + } + if err == io.ErrUnexpectedEOF { + err = status.Errorf(codes.Internal, io.ErrUnexpectedEOF.Error()) + } + if err != nil { + if st, ok := status.FromError(err); ok { + if e := t.WriteStatus(stream, st); e != nil { + grpclog.Warningf("grpc: Server.processUnaryRPC failed to write status %v", e) + } + } else { + switch st := err.(type) { + case transport.ConnectionError: + // Nothing to do here. + case transport.StreamError: + if e := t.WriteStatus(stream, status.New(st.Code, st.Desc)); e != nil { + grpclog.Warningf("grpc: Server.processUnaryRPC failed to write status %v", e) + } + default: + panic(fmt.Sprintf("grpc: Unexpected error (%T) from recvMsg: %v", st, st)) + } + } + return err + } + if channelz.IsOn() { + t.IncrMsgRecv() + } + if st := checkRecvPayload(pf, stream.RecvCompress(), dc != nil || decomp != nil); st != nil { + if e := t.WriteStatus(stream, st); e != nil { + grpclog.Warningf("grpc: Server.processUnaryRPC failed to write status %v", e) + } + return st.Err() + } + var inPayload *stats.InPayload + if sh != nil { + inPayload = &stats.InPayload{ + RecvTime: time.Now(), + } + } + df := func(v interface{}) error { + if inPayload != nil { + inPayload.WireLength = len(req) + } + if pf == compressionMade { + var err error + if dc != nil { + req, err = dc.Do(bytes.NewReader(req)) + if err != nil { + return status.Errorf(codes.Internal, err.Error()) + } + } else { + tmp, _ := decomp.Decompress(bytes.NewReader(req)) + req, err = ioutil.ReadAll(tmp) + if err != nil { + return status.Errorf(codes.Internal, "grpc: failed to decompress the received message %v", err) + } + } + } + if len(req) > s.opts.maxReceiveMessageSize { + // TODO: Revisit the error code. Currently keep it consistent with + // java implementation. + return status.Errorf(codes.ResourceExhausted, "grpc: received message larger than max (%d vs. %d)", len(req), s.opts.maxReceiveMessageSize) + } + if err := s.getCodec(stream.ContentSubtype()).Unmarshal(req, v); err != nil { + return status.Errorf(codes.Internal, "grpc: error unmarshalling request: %v", err) + } + if inPayload != nil { + inPayload.Payload = v + inPayload.Data = req + inPayload.Length = len(req) + sh.HandleRPC(stream.Context(), inPayload) + } + if trInfo != nil { + trInfo.tr.LazyLog(&payload{sent: false, msg: v}, true) + } + return nil + } + ctx := NewContextWithServerTransportStream(stream.Context(), stream) + reply, appErr := md.Handler(srv.server, ctx, df, s.opts.unaryInt) + if appErr != nil { + appStatus, ok := status.FromError(appErr) + if !ok { + // Convert appErr if it is not a grpc status error. + appErr = status.Error(codes.Unknown, appErr.Error()) + appStatus, _ = status.FromError(appErr) + } + if trInfo != nil { + trInfo.tr.LazyLog(stringer(appStatus.Message()), true) + trInfo.tr.SetError() + } + if e := t.WriteStatus(stream, appStatus); e != nil { + grpclog.Warningf("grpc: Server.processUnaryRPC failed to write status: %v", e) + } + return appErr + } + if trInfo != nil { + trInfo.tr.LazyLog(stringer("OK"), false) + } + opts := &transport.Options{ + Last: true, + Delay: false, + } + + if err := s.sendResponse(t, stream, reply, cp, opts, comp); err != nil { + if err == io.EOF { + // The entire stream is done (for unary RPC only). + return err + } + if s, ok := status.FromError(err); ok { + if e := t.WriteStatus(stream, s); e != nil { + grpclog.Warningf("grpc: Server.processUnaryRPC failed to write status: %v", e) + } + } else { + switch st := err.(type) { + case transport.ConnectionError: + // Nothing to do here. + case transport.StreamError: + if e := t.WriteStatus(stream, status.New(st.Code, st.Desc)); e != nil { + grpclog.Warningf("grpc: Server.processUnaryRPC failed to write status %v", e) + } + default: + panic(fmt.Sprintf("grpc: Unexpected error (%T) from sendResponse: %v", st, st)) + } + } + return err + } + if channelz.IsOn() { + t.IncrMsgSent() + } + if trInfo != nil { + trInfo.tr.LazyLog(&payload{sent: true, msg: reply}, true) + } + // TODO: Should we be logging if writing status failed here, like above? + // Should the logging be in WriteStatus? Should we ignore the WriteStatus + // error or allow the stats handler to see it? + return t.WriteStatus(stream, status.New(codes.OK, "")) +} + +func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transport.Stream, srv *service, sd *StreamDesc, trInfo *traceInfo) (err error) { + if channelz.IsOn() { + s.incrCallsStarted() + defer func() { + if err != nil && err != io.EOF { + s.incrCallsFailed() + } else { + s.incrCallsSucceeded() + } + }() + } + sh := s.opts.statsHandler + if sh != nil { + beginTime := time.Now() + begin := &stats.Begin{ + BeginTime: beginTime, + } + sh.HandleRPC(stream.Context(), begin) + defer func() { + end := &stats.End{ + BeginTime: beginTime, + EndTime: time.Now(), + } + if err != nil && err != io.EOF { + end.Error = toRPCErr(err) + } + sh.HandleRPC(stream.Context(), end) + }() + } + ctx := NewContextWithServerTransportStream(stream.Context(), stream) + ss := &serverStream{ + ctx: ctx, + t: t, + s: stream, + p: &parser{r: stream}, + codec: s.getCodec(stream.ContentSubtype()), + maxReceiveMessageSize: s.opts.maxReceiveMessageSize, + maxSendMessageSize: s.opts.maxSendMessageSize, + trInfo: trInfo, + statsHandler: sh, + } + + // If dc is set and matches the stream's compression, use it. Otherwise, try + // to find a matching registered compressor for decomp. + if rc := stream.RecvCompress(); s.opts.dc != nil && s.opts.dc.Type() == rc { + ss.dc = s.opts.dc + } else if rc != "" && rc != encoding.Identity { + ss.decomp = encoding.GetCompressor(rc) + if ss.decomp == nil { + st := status.Newf(codes.Unimplemented, "grpc: Decompressor is not installed for grpc-encoding %q", rc) + t.WriteStatus(ss.s, st) + return st.Err() + } + } + + // If cp is set, use it. Otherwise, attempt to compress the response using + // the incoming message compression method. + // + // NOTE: this needs to be ahead of all handling, https://github.com/grpc/grpc-go/issues/686. + if s.opts.cp != nil { + ss.cp = s.opts.cp + stream.SetSendCompress(s.opts.cp.Type()) + } else if rc := stream.RecvCompress(); rc != "" && rc != encoding.Identity { + // Legacy compressor not specified; attempt to respond with same encoding. + ss.comp = encoding.GetCompressor(rc) + if ss.comp != nil { + stream.SetSendCompress(rc) + } + } + + if trInfo != nil { + trInfo.tr.LazyLog(&trInfo.firstLine, false) + defer func() { + ss.mu.Lock() + if err != nil && err != io.EOF { + ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) + ss.trInfo.tr.SetError() + } + ss.trInfo.tr.Finish() + ss.trInfo.tr = nil + ss.mu.Unlock() + }() + } + var appErr error + var server interface{} + if srv != nil { + server = srv.server + } + if s.opts.streamInt == nil { + appErr = sd.Handler(server, ss) + } else { + info := &StreamServerInfo{ + FullMethod: stream.Method(), + IsClientStream: sd.ClientStreams, + IsServerStream: sd.ServerStreams, + } + appErr = s.opts.streamInt(server, ss, info, sd.Handler) + } + if appErr != nil { + appStatus, ok := status.FromError(appErr) + if !ok { + switch err := appErr.(type) { + case transport.StreamError: + appStatus = status.New(err.Code, err.Desc) + default: + appStatus = status.New(codes.Unknown, appErr.Error()) + } + appErr = appStatus.Err() + } + if trInfo != nil { + ss.mu.Lock() + ss.trInfo.tr.LazyLog(stringer(appStatus.Message()), true) + ss.trInfo.tr.SetError() + ss.mu.Unlock() + } + t.WriteStatus(ss.s, appStatus) + // TODO: Should we log an error from WriteStatus here and below? + return appErr + } + if trInfo != nil { + ss.mu.Lock() + ss.trInfo.tr.LazyLog(stringer("OK"), false) + ss.mu.Unlock() + } + return t.WriteStatus(ss.s, status.New(codes.OK, "")) +} + +func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Stream, trInfo *traceInfo) { + sm := stream.Method() + if sm != "" && sm[0] == '/' { + sm = sm[1:] + } + pos := strings.LastIndex(sm, "/") + if pos == -1 { + if trInfo != nil { + trInfo.tr.LazyLog(&fmtStringer{"Malformed method name %q", []interface{}{sm}}, true) + trInfo.tr.SetError() + } + errDesc := fmt.Sprintf("malformed method name: %q", stream.Method()) + if err := t.WriteStatus(stream, status.New(codes.ResourceExhausted, errDesc)); err != nil { + if trInfo != nil { + trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) + trInfo.tr.SetError() + } + grpclog.Warningf("grpc: Server.handleStream failed to write status: %v", err) + } + if trInfo != nil { + trInfo.tr.Finish() + } + return + } + service := sm[:pos] + method := sm[pos+1:] + srv, ok := s.m[service] + if !ok { + if unknownDesc := s.opts.unknownStreamDesc; unknownDesc != nil { + s.processStreamingRPC(t, stream, nil, unknownDesc, trInfo) + return + } + if trInfo != nil { + trInfo.tr.LazyLog(&fmtStringer{"Unknown service %v", []interface{}{service}}, true) + trInfo.tr.SetError() + } + errDesc := fmt.Sprintf("unknown service %v", service) + if err := t.WriteStatus(stream, status.New(codes.Unimplemented, errDesc)); err != nil { + if trInfo != nil { + trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) + trInfo.tr.SetError() + } + grpclog.Warningf("grpc: Server.handleStream failed to write status: %v", err) + } + if trInfo != nil { + trInfo.tr.Finish() + } + return + } + // Unary RPC or Streaming RPC? + if md, ok := srv.md[method]; ok { + s.processUnaryRPC(t, stream, srv, md, trInfo) + return + } + if sd, ok := srv.sd[method]; ok { + s.processStreamingRPC(t, stream, srv, sd, trInfo) + return + } + if trInfo != nil { + trInfo.tr.LazyLog(&fmtStringer{"Unknown method %v", []interface{}{method}}, true) + trInfo.tr.SetError() + } + if unknownDesc := s.opts.unknownStreamDesc; unknownDesc != nil { + s.processStreamingRPC(t, stream, nil, unknownDesc, trInfo) + return + } + errDesc := fmt.Sprintf("unknown method %v", method) + if err := t.WriteStatus(stream, status.New(codes.Unimplemented, errDesc)); err != nil { + if trInfo != nil { + trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) + trInfo.tr.SetError() + } + grpclog.Warningf("grpc: Server.handleStream failed to write status: %v", err) + } + if trInfo != nil { + trInfo.tr.Finish() + } +} + +// The key to save ServerTransportStream in the context. +type streamKey struct{} + +// NewContextWithServerTransportStream creates a new context from ctx and +// attaches stream to it. +// +// This API is EXPERIMENTAL. +func NewContextWithServerTransportStream(ctx context.Context, stream ServerTransportStream) context.Context { + return context.WithValue(ctx, streamKey{}, stream) +} + +// ServerTransportStream is a minimal interface that a transport stream must +// implement. This can be used to mock an actual transport stream for tests of +// handler code that use, for example, grpc.SetHeader (which requires some +// stream to be in context). +// +// See also NewContextWithServerTransportStream. +// +// This API is EXPERIMENTAL. +type ServerTransportStream interface { + Method() string + SetHeader(md metadata.MD) error + SendHeader(md metadata.MD) error + SetTrailer(md metadata.MD) error +} + +// ServerTransportStreamFromContext returns the ServerTransportStream saved in +// ctx. Returns nil if the given context has no stream associated with it +// (which implies it is not an RPC invocation context). +// +// This API is EXPERIMENTAL. +func ServerTransportStreamFromContext(ctx context.Context) ServerTransportStream { + s, _ := ctx.Value(streamKey{}).(ServerTransportStream) + return s +} + +// Stop stops the gRPC server. It immediately closes all open +// connections and listeners. +// It cancels all active RPCs on the server side and the corresponding +// pending RPCs on the client side will get notified by connection +// errors. +func (s *Server) Stop() { + s.quitOnce.Do(func() { + close(s.quit) + }) + + defer func() { + s.serveWG.Wait() + s.doneOnce.Do(func() { + close(s.done) + }) + }() + + s.channelzRemoveOnce.Do(func() { + if channelz.IsOn() { + channelz.RemoveEntry(s.channelzID) + } + }) + + s.mu.Lock() + listeners := s.lis + s.lis = nil + st := s.conns + s.conns = nil + // interrupt GracefulStop if Stop and GracefulStop are called concurrently. + s.cv.Broadcast() + s.mu.Unlock() + + for lis := range listeners { + lis.Close() + } + for c := range st { + c.Close() + } + + s.mu.Lock() + if s.events != nil { + s.events.Finish() + s.events = nil + } + s.mu.Unlock() +} + +// GracefulStop stops the gRPC server gracefully. It stops the server from +// accepting new connections and RPCs and blocks until all the pending RPCs are +// finished. +func (s *Server) GracefulStop() { + s.quitOnce.Do(func() { + close(s.quit) + }) + + defer func() { + s.doneOnce.Do(func() { + close(s.done) + }) + }() + + s.channelzRemoveOnce.Do(func() { + if channelz.IsOn() { + channelz.RemoveEntry(s.channelzID) + } + }) + s.mu.Lock() + if s.conns == nil { + s.mu.Unlock() + return + } + + for lis := range s.lis { + lis.Close() + } + s.lis = nil + if !s.drain { + for c := range s.conns { + c.(transport.ServerTransport).Drain() + } + s.drain = true + } + + // Wait for serving threads to be ready to exit. Only then can we be sure no + // new conns will be created. + s.mu.Unlock() + s.serveWG.Wait() + s.mu.Lock() + + for len(s.conns) != 0 { + s.cv.Wait() + } + s.conns = nil + if s.events != nil { + s.events.Finish() + s.events = nil + } + s.mu.Unlock() +} + +func init() { + internal.TestingUseHandlerImpl = func(arg interface{}) { + arg.(*Server).opts.useHandlerImpl = true + } +} + +// contentSubtype must be lowercase +// cannot return nil +func (s *Server) getCodec(contentSubtype string) baseCodec { + if s.opts.codec != nil { + return s.opts.codec + } + if contentSubtype == "" { + return encoding.GetCodec(proto.Name) + } + codec := encoding.GetCodec(contentSubtype) + if codec == nil { + return encoding.GetCodec(proto.Name) + } + return codec +} + +// SetHeader sets the header metadata. +// When called multiple times, all the provided metadata will be merged. +// All the metadata will be sent out when one of the following happens: +// - grpc.SendHeader() is called; +// - The first response is sent out; +// - An RPC status is sent out (error or success). +func SetHeader(ctx context.Context, md metadata.MD) error { + if md.Len() == 0 { + return nil + } + stream := ServerTransportStreamFromContext(ctx) + if stream == nil { + return status.Errorf(codes.Internal, "grpc: failed to fetch the stream from the context %v", ctx) + } + return stream.SetHeader(md) +} + +// SendHeader sends header metadata. It may be called at most once. +// The provided md and headers set by SetHeader() will be sent. +func SendHeader(ctx context.Context, md metadata.MD) error { + stream := ServerTransportStreamFromContext(ctx) + if stream == nil { + return status.Errorf(codes.Internal, "grpc: failed to fetch the stream from the context %v", ctx) + } + if err := stream.SendHeader(md); err != nil { + return toRPCErr(err) + } + return nil +} + +// SetTrailer sets the trailer metadata that will be sent when an RPC returns. +// When called more than once, all the provided metadata will be merged. +func SetTrailer(ctx context.Context, md metadata.MD) error { + if md.Len() == 0 { + return nil + } + stream := ServerTransportStreamFromContext(ctx) + if stream == nil { + return status.Errorf(codes.Internal, "grpc: failed to fetch the stream from the context %v", ctx) + } + return stream.SetTrailer(md) +} + +// Method returns the method string for the server context. The returned +// string is in the format of "/service/method". +func Method(ctx context.Context) (string, bool) { + s := ServerTransportStreamFromContext(ctx) + if s == nil { + return "", false + } + return s.Method(), true +} diff --git a/vendor/google.golang.org/grpc/service_config.go b/vendor/google.golang.org/grpc/service_config.go new file mode 100644 index 00000000000..015631d8d38 --- /dev/null +++ b/vendor/google.golang.org/grpc/service_config.go @@ -0,0 +1,233 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "encoding/json" + "fmt" + "strconv" + "strings" + "time" + + "google.golang.org/grpc/grpclog" +) + +const maxInt = int(^uint(0) >> 1) + +// MethodConfig defines the configuration recommended by the service providers for a +// particular method. +// +// Deprecated: Users should not use this struct. Service config should be received +// through name resolver, as specified here +// https://github.com/grpc/grpc/blob/master/doc/service_config.md +type MethodConfig struct { + // WaitForReady indicates whether RPCs sent to this method should wait until + // the connection is ready by default (!failfast). The value specified via the + // gRPC client API will override the value set here. + WaitForReady *bool + // Timeout is the default timeout for RPCs sent to this method. The actual + // deadline used will be the minimum of the value specified here and the value + // set by the application via the gRPC client API. If either one is not set, + // then the other will be used. If neither is set, then the RPC has no deadline. + Timeout *time.Duration + // MaxReqSize is the maximum allowed payload size for an individual request in a + // stream (client->server) in bytes. The size which is measured is the serialized + // payload after per-message compression (but before stream compression) in bytes. + // The actual value used is the minimum of the value specified here and the value set + // by the application via the gRPC client API. If either one is not set, then the other + // will be used. If neither is set, then the built-in default is used. + MaxReqSize *int + // MaxRespSize is the maximum allowed payload size for an individual response in a + // stream (server->client) in bytes. + MaxRespSize *int +} + +// ServiceConfig is provided by the service provider and contains parameters for how +// clients that connect to the service should behave. +// +// Deprecated: Users should not use this struct. Service config should be received +// through name resolver, as specified here +// https://github.com/grpc/grpc/blob/master/doc/service_config.md +type ServiceConfig struct { + // LB is the load balancer the service providers recommends. The balancer specified + // via grpc.WithBalancer will override this. + LB *string + // Methods contains a map for the methods in this service. + // If there is an exact match for a method (i.e. /service/method) in the map, use the corresponding MethodConfig. + // If there's no exact match, look for the default config for the service (/service/) and use the corresponding MethodConfig if it exists. + // Otherwise, the method has no MethodConfig to use. + Methods map[string]MethodConfig + + stickinessMetadataKey *string +} + +func parseDuration(s *string) (*time.Duration, error) { + if s == nil { + return nil, nil + } + if !strings.HasSuffix(*s, "s") { + return nil, fmt.Errorf("malformed duration %q", *s) + } + ss := strings.SplitN((*s)[:len(*s)-1], ".", 3) + if len(ss) > 2 { + return nil, fmt.Errorf("malformed duration %q", *s) + } + // hasDigits is set if either the whole or fractional part of the number is + // present, since both are optional but one is required. + hasDigits := false + var d time.Duration + if len(ss[0]) > 0 { + i, err := strconv.ParseInt(ss[0], 10, 32) + if err != nil { + return nil, fmt.Errorf("malformed duration %q: %v", *s, err) + } + d = time.Duration(i) * time.Second + hasDigits = true + } + if len(ss) == 2 && len(ss[1]) > 0 { + if len(ss[1]) > 9 { + return nil, fmt.Errorf("malformed duration %q", *s) + } + f, err := strconv.ParseInt(ss[1], 10, 64) + if err != nil { + return nil, fmt.Errorf("malformed duration %q: %v", *s, err) + } + for i := 9; i > len(ss[1]); i-- { + f *= 10 + } + d += time.Duration(f) + hasDigits = true + } + if !hasDigits { + return nil, fmt.Errorf("malformed duration %q", *s) + } + + return &d, nil +} + +type jsonName struct { + Service *string + Method *string +} + +func (j jsonName) generatePath() (string, bool) { + if j.Service == nil { + return "", false + } + res := "/" + *j.Service + "/" + if j.Method != nil { + res += *j.Method + } + return res, true +} + +// TODO(lyuxuan): delete this struct after cleaning up old service config implementation. +type jsonMC struct { + Name *[]jsonName + WaitForReady *bool + Timeout *string + MaxRequestMessageBytes *int64 + MaxResponseMessageBytes *int64 +} + +// TODO(lyuxuan): delete this struct after cleaning up old service config implementation. +type jsonSC struct { + LoadBalancingPolicy *string + StickinessMetadataKey *string + MethodConfig *[]jsonMC +} + +func parseServiceConfig(js string) (ServiceConfig, error) { + var rsc jsonSC + err := json.Unmarshal([]byte(js), &rsc) + if err != nil { + grpclog.Warningf("grpc: parseServiceConfig error unmarshaling %s due to %v", js, err) + return ServiceConfig{}, err + } + sc := ServiceConfig{ + LB: rsc.LoadBalancingPolicy, + Methods: make(map[string]MethodConfig), + + stickinessMetadataKey: rsc.StickinessMetadataKey, + } + if rsc.MethodConfig == nil { + return sc, nil + } + + for _, m := range *rsc.MethodConfig { + if m.Name == nil { + continue + } + d, err := parseDuration(m.Timeout) + if err != nil { + grpclog.Warningf("grpc: parseServiceConfig error unmarshaling %s due to %v", js, err) + return ServiceConfig{}, err + } + + mc := MethodConfig{ + WaitForReady: m.WaitForReady, + Timeout: d, + } + if m.MaxRequestMessageBytes != nil { + if *m.MaxRequestMessageBytes > int64(maxInt) { + mc.MaxReqSize = newInt(maxInt) + } else { + mc.MaxReqSize = newInt(int(*m.MaxRequestMessageBytes)) + } + } + if m.MaxResponseMessageBytes != nil { + if *m.MaxResponseMessageBytes > int64(maxInt) { + mc.MaxRespSize = newInt(maxInt) + } else { + mc.MaxRespSize = newInt(int(*m.MaxResponseMessageBytes)) + } + } + for _, n := range *m.Name { + if path, valid := n.generatePath(); valid { + sc.Methods[path] = mc + } + } + } + + return sc, nil +} + +func min(a, b *int) *int { + if *a < *b { + return a + } + return b +} + +func getMaxSize(mcMax, doptMax *int, defaultVal int) *int { + if mcMax == nil && doptMax == nil { + return &defaultVal + } + if mcMax != nil && doptMax != nil { + return min(mcMax, doptMax) + } + if mcMax != nil { + return mcMax + } + return doptMax +} + +func newInt(b int) *int { + return &b +} diff --git a/vendor/google.golang.org/grpc/stats/handlers.go b/vendor/google.golang.org/grpc/stats/handlers.go new file mode 100644 index 00000000000..05b384c6931 --- /dev/null +++ b/vendor/google.golang.org/grpc/stats/handlers.go @@ -0,0 +1,64 @@ +/* + * + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package stats + +import ( + "net" + + "golang.org/x/net/context" +) + +// ConnTagInfo defines the relevant information needed by connection context tagger. +type ConnTagInfo struct { + // RemoteAddr is the remote address of the corresponding connection. + RemoteAddr net.Addr + // LocalAddr is the local address of the corresponding connection. + LocalAddr net.Addr +} + +// RPCTagInfo defines the relevant information needed by RPC context tagger. +type RPCTagInfo struct { + // FullMethodName is the RPC method in the format of /package.service/method. + FullMethodName string + // FailFast indicates if this RPC is failfast. + // This field is only valid on client side, it's always false on server side. + FailFast bool +} + +// Handler defines the interface for the related stats handling (e.g., RPCs, connections). +type Handler interface { + // TagRPC can attach some information to the given context. + // The context used for the rest lifetime of the RPC will be derived from + // the returned context. + TagRPC(context.Context, *RPCTagInfo) context.Context + // HandleRPC processes the RPC stats. + HandleRPC(context.Context, RPCStats) + + // TagConn can attach some information to the given context. + // The returned context will be used for stats handling. + // For conn stats handling, the context used in HandleConn for this + // connection will be derived from the context returned. + // For RPC stats handling, + // - On server side, the context used in HandleRPC for all RPCs on this + // connection will be derived from the context returned. + // - On client side, the context is not derived from the context returned. + TagConn(context.Context, *ConnTagInfo) context.Context + // HandleConn processes the Conn stats. + HandleConn(context.Context, ConnStats) +} diff --git a/vendor/google.golang.org/grpc/stats/stats.go b/vendor/google.golang.org/grpc/stats/stats.go new file mode 100644 index 00000000000..3f13190a0ac --- /dev/null +++ b/vendor/google.golang.org/grpc/stats/stats.go @@ -0,0 +1,296 @@ +/* + * + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +//go:generate protoc --go_out=plugins=grpc:. grpc_testing/test.proto + +// Package stats is for collecting and reporting various network and RPC stats. +// This package is for monitoring purpose only. All fields are read-only. +// All APIs are experimental. +package stats // import "google.golang.org/grpc/stats" + +import ( + "net" + "time" + + "golang.org/x/net/context" +) + +// RPCStats contains stats information about RPCs. +type RPCStats interface { + isRPCStats() + // IsClient returns true if this RPCStats is from client side. + IsClient() bool +} + +// Begin contains stats when an RPC begins. +// FailFast is only valid if this Begin is from client side. +type Begin struct { + // Client is true if this Begin is from client side. + Client bool + // BeginTime is the time when the RPC begins. + BeginTime time.Time + // FailFast indicates if this RPC is failfast. + FailFast bool +} + +// IsClient indicates if the stats information is from client side. +func (s *Begin) IsClient() bool { return s.Client } + +func (s *Begin) isRPCStats() {} + +// InPayload contains the information for an incoming payload. +type InPayload struct { + // Client is true if this InPayload is from client side. + Client bool + // Payload is the payload with original type. + Payload interface{} + // Data is the serialized message payload. + Data []byte + // Length is the length of uncompressed data. + Length int + // WireLength is the length of data on wire (compressed, signed, encrypted). + WireLength int + // RecvTime is the time when the payload is received. + RecvTime time.Time +} + +// IsClient indicates if the stats information is from client side. +func (s *InPayload) IsClient() bool { return s.Client } + +func (s *InPayload) isRPCStats() {} + +// InHeader contains stats when a header is received. +type InHeader struct { + // Client is true if this InHeader is from client side. + Client bool + // WireLength is the wire length of header. + WireLength int + + // The following fields are valid only if Client is false. + // FullMethod is the full RPC method string, i.e., /package.service/method. + FullMethod string + // RemoteAddr is the remote address of the corresponding connection. + RemoteAddr net.Addr + // LocalAddr is the local address of the corresponding connection. + LocalAddr net.Addr + // Compression is the compression algorithm used for the RPC. + Compression string +} + +// IsClient indicates if the stats information is from client side. +func (s *InHeader) IsClient() bool { return s.Client } + +func (s *InHeader) isRPCStats() {} + +// InTrailer contains stats when a trailer is received. +type InTrailer struct { + // Client is true if this InTrailer is from client side. + Client bool + // WireLength is the wire length of trailer. + WireLength int +} + +// IsClient indicates if the stats information is from client side. +func (s *InTrailer) IsClient() bool { return s.Client } + +func (s *InTrailer) isRPCStats() {} + +// OutPayload contains the information for an outgoing payload. +type OutPayload struct { + // Client is true if this OutPayload is from client side. + Client bool + // Payload is the payload with original type. + Payload interface{} + // Data is the serialized message payload. + Data []byte + // Length is the length of uncompressed data. + Length int + // WireLength is the length of data on wire (compressed, signed, encrypted). + WireLength int + // SentTime is the time when the payload is sent. + SentTime time.Time +} + +// IsClient indicates if this stats information is from client side. +func (s *OutPayload) IsClient() bool { return s.Client } + +func (s *OutPayload) isRPCStats() {} + +// OutHeader contains stats when a header is sent. +type OutHeader struct { + // Client is true if this OutHeader is from client side. + Client bool + + // The following fields are valid only if Client is true. + // FullMethod is the full RPC method string, i.e., /package.service/method. + FullMethod string + // RemoteAddr is the remote address of the corresponding connection. + RemoteAddr net.Addr + // LocalAddr is the local address of the corresponding connection. + LocalAddr net.Addr + // Compression is the compression algorithm used for the RPC. + Compression string +} + +// IsClient indicates if this stats information is from client side. +func (s *OutHeader) IsClient() bool { return s.Client } + +func (s *OutHeader) isRPCStats() {} + +// OutTrailer contains stats when a trailer is sent. +type OutTrailer struct { + // Client is true if this OutTrailer is from client side. + Client bool + // WireLength is the wire length of trailer. + WireLength int +} + +// IsClient indicates if this stats information is from client side. +func (s *OutTrailer) IsClient() bool { return s.Client } + +func (s *OutTrailer) isRPCStats() {} + +// End contains stats when an RPC ends. +type End struct { + // Client is true if this End is from client side. + Client bool + // BeginTime is the time when the RPC began. + BeginTime time.Time + // EndTime is the time when the RPC ends. + EndTime time.Time + // Error is the error the RPC ended with. It is an error generated from + // status.Status and can be converted back to status.Status using + // status.FromError if non-nil. + Error error +} + +// IsClient indicates if this is from client side. +func (s *End) IsClient() bool { return s.Client } + +func (s *End) isRPCStats() {} + +// ConnStats contains stats information about connections. +type ConnStats interface { + isConnStats() + // IsClient returns true if this ConnStats is from client side. + IsClient() bool +} + +// ConnBegin contains the stats of a connection when it is established. +type ConnBegin struct { + // Client is true if this ConnBegin is from client side. + Client bool +} + +// IsClient indicates if this is from client side. +func (s *ConnBegin) IsClient() bool { return s.Client } + +func (s *ConnBegin) isConnStats() {} + +// ConnEnd contains the stats of a connection when it ends. +type ConnEnd struct { + // Client is true if this ConnEnd is from client side. + Client bool +} + +// IsClient indicates if this is from client side. +func (s *ConnEnd) IsClient() bool { return s.Client } + +func (s *ConnEnd) isConnStats() {} + +type incomingTagsKey struct{} +type outgoingTagsKey struct{} + +// SetTags attaches stats tagging data to the context, which will be sent in +// the outgoing RPC with the header grpc-tags-bin. Subsequent calls to +// SetTags will overwrite the values from earlier calls. +// +// NOTE: this is provided only for backward compatibility with existing clients +// and will likely be removed in an upcoming release. New uses should transmit +// this type of data using metadata with a different, non-reserved (i.e. does +// not begin with "grpc-") header name. +func SetTags(ctx context.Context, b []byte) context.Context { + return context.WithValue(ctx, outgoingTagsKey{}, b) +} + +// Tags returns the tags from the context for the inbound RPC. +// +// NOTE: this is provided only for backward compatibility with existing clients +// and will likely be removed in an upcoming release. New uses should transmit +// this type of data using metadata with a different, non-reserved (i.e. does +// not begin with "grpc-") header name. +func Tags(ctx context.Context) []byte { + b, _ := ctx.Value(incomingTagsKey{}).([]byte) + return b +} + +// SetIncomingTags attaches stats tagging data to the context, to be read by +// the application (not sent in outgoing RPCs). +// +// This is intended for gRPC-internal use ONLY. +func SetIncomingTags(ctx context.Context, b []byte) context.Context { + return context.WithValue(ctx, incomingTagsKey{}, b) +} + +// OutgoingTags returns the tags from the context for the outbound RPC. +// +// This is intended for gRPC-internal use ONLY. +func OutgoingTags(ctx context.Context) []byte { + b, _ := ctx.Value(outgoingTagsKey{}).([]byte) + return b +} + +type incomingTraceKey struct{} +type outgoingTraceKey struct{} + +// SetTrace attaches stats tagging data to the context, which will be sent in +// the outgoing RPC with the header grpc-trace-bin. Subsequent calls to +// SetTrace will overwrite the values from earlier calls. +// +// NOTE: this is provided only for backward compatibility with existing clients +// and will likely be removed in an upcoming release. New uses should transmit +// this type of data using metadata with a different, non-reserved (i.e. does +// not begin with "grpc-") header name. +func SetTrace(ctx context.Context, b []byte) context.Context { + return context.WithValue(ctx, outgoingTraceKey{}, b) +} + +// Trace returns the trace from the context for the inbound RPC. +// +// NOTE: this is provided only for backward compatibility with existing clients +// and will likely be removed in an upcoming release. New uses should transmit +// this type of data using metadata with a different, non-reserved (i.e. does +// not begin with "grpc-") header name. +func Trace(ctx context.Context) []byte { + b, _ := ctx.Value(incomingTraceKey{}).([]byte) + return b +} + +// SetIncomingTrace attaches stats tagging data to the context, to be read by +// the application (not sent in outgoing RPCs). It is intended for +// gRPC-internal use. +func SetIncomingTrace(ctx context.Context, b []byte) context.Context { + return context.WithValue(ctx, incomingTraceKey{}, b) +} + +// OutgoingTrace returns the trace from the context for the outbound RPC. It is +// intended for gRPC-internal use. +func OutgoingTrace(ctx context.Context) []byte { + b, _ := ctx.Value(outgoingTraceKey{}).([]byte) + return b +} diff --git a/vendor/google.golang.org/grpc/status/status.go b/vendor/google.golang.org/grpc/status/status.go new file mode 100644 index 00000000000..9c61b094508 --- /dev/null +++ b/vendor/google.golang.org/grpc/status/status.go @@ -0,0 +1,189 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package status implements errors returned by gRPC. These errors are +// serialized and transmitted on the wire between server and client, and allow +// for additional data to be transmitted via the Details field in the status +// proto. gRPC service handlers should return an error created by this +// package, and gRPC clients should expect a corresponding error to be +// returned from the RPC call. +// +// This package upholds the invariants that a non-nil error may not +// contain an OK code, and an OK code must result in a nil error. +package status + +import ( + "errors" + "fmt" + + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes" + spb "google.golang.org/genproto/googleapis/rpc/status" + "google.golang.org/grpc/codes" +) + +// statusError is an alias of a status proto. It implements error and Status, +// and a nil statusError should never be returned by this package. +type statusError spb.Status + +func (se *statusError) Error() string { + p := (*spb.Status)(se) + return fmt.Sprintf("rpc error: code = %s desc = %s", codes.Code(p.GetCode()), p.GetMessage()) +} + +func (se *statusError) GRPCStatus() *Status { + return &Status{s: (*spb.Status)(se)} +} + +// Status represents an RPC status code, message, and details. It is immutable +// and should be created with New, Newf, or FromProto. +type Status struct { + s *spb.Status +} + +// Code returns the status code contained in s. +func (s *Status) Code() codes.Code { + if s == nil || s.s == nil { + return codes.OK + } + return codes.Code(s.s.Code) +} + +// Message returns the message contained in s. +func (s *Status) Message() string { + if s == nil || s.s == nil { + return "" + } + return s.s.Message +} + +// Proto returns s's status as an spb.Status proto message. +func (s *Status) Proto() *spb.Status { + if s == nil { + return nil + } + return proto.Clone(s.s).(*spb.Status) +} + +// Err returns an immutable error representing s; returns nil if s.Code() is +// OK. +func (s *Status) Err() error { + if s.Code() == codes.OK { + return nil + } + return (*statusError)(s.s) +} + +// New returns a Status representing c and msg. +func New(c codes.Code, msg string) *Status { + return &Status{s: &spb.Status{Code: int32(c), Message: msg}} +} + +// Newf returns New(c, fmt.Sprintf(format, a...)). +func Newf(c codes.Code, format string, a ...interface{}) *Status { + return New(c, fmt.Sprintf(format, a...)) +} + +// Error returns an error representing c and msg. If c is OK, returns nil. +func Error(c codes.Code, msg string) error { + return New(c, msg).Err() +} + +// Errorf returns Error(c, fmt.Sprintf(format, a...)). +func Errorf(c codes.Code, format string, a ...interface{}) error { + return Error(c, fmt.Sprintf(format, a...)) +} + +// ErrorProto returns an error representing s. If s.Code is OK, returns nil. +func ErrorProto(s *spb.Status) error { + return FromProto(s).Err() +} + +// FromProto returns a Status representing s. +func FromProto(s *spb.Status) *Status { + return &Status{s: proto.Clone(s).(*spb.Status)} +} + +// FromError returns a Status representing err if it was produced from this +// package or has a method `GRPCStatus() *Status`. Otherwise, ok is false and a +// Status is returned with codes.Unknown and the original error message. +func FromError(err error) (s *Status, ok bool) { + if err == nil { + return &Status{s: &spb.Status{Code: int32(codes.OK)}}, true + } + if se, ok := err.(interface{ GRPCStatus() *Status }); ok { + return se.GRPCStatus(), true + } + return New(codes.Unknown, err.Error()), false +} + +// Convert is a convenience function which removes the need to handle the +// boolean return value from FromError. +func Convert(err error) *Status { + s, _ := FromError(err) + return s +} + +// WithDetails returns a new status with the provided details messages appended to the status. +// If any errors are encountered, it returns nil and the first error encountered. +func (s *Status) WithDetails(details ...proto.Message) (*Status, error) { + if s.Code() == codes.OK { + return nil, errors.New("no error details for status with code OK") + } + // s.Code() != OK implies that s.Proto() != nil. + p := s.Proto() + for _, detail := range details { + any, err := ptypes.MarshalAny(detail) + if err != nil { + return nil, err + } + p.Details = append(p.Details, any) + } + return &Status{s: p}, nil +} + +// Details returns a slice of details messages attached to the status. +// If a detail cannot be decoded, the error is returned in place of the detail. +func (s *Status) Details() []interface{} { + if s == nil || s.s == nil { + return nil + } + details := make([]interface{}, 0, len(s.s.Details)) + for _, any := range s.s.Details { + detail := &ptypes.DynamicAny{} + if err := ptypes.UnmarshalAny(any, detail); err != nil { + details = append(details, err) + continue + } + details = append(details, detail.Message) + } + return details +} + +// Code returns the Code of the error if it is a Status error, codes.OK if err +// is nil, or codes.Unknown otherwise. +func Code(err error) codes.Code { + // Don't use FromError to avoid allocation of OK status. + if err == nil { + return codes.OK + } + if se, ok := err.(interface{ GRPCStatus() *Status }); ok { + return se.GRPCStatus().Code() + } + return codes.Unknown +} diff --git a/vendor/google.golang.org/grpc/stream.go b/vendor/google.golang.org/grpc/stream.go new file mode 100644 index 00000000000..82921a15a3a --- /dev/null +++ b/vendor/google.golang.org/grpc/stream.go @@ -0,0 +1,765 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "errors" + "io" + "sync" + "time" + + "golang.org/x/net/context" + "golang.org/x/net/trace" + "google.golang.org/grpc/balancer" + "google.golang.org/grpc/channelz" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/encoding" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/stats" + "google.golang.org/grpc/status" + "google.golang.org/grpc/transport" +) + +// StreamHandler defines the handler called by gRPC server to complete the +// execution of a streaming RPC. If a StreamHandler returns an error, it +// should be produced by the status package, or else gRPC will use +// codes.Unknown as the status code and err.Error() as the status message +// of the RPC. +type StreamHandler func(srv interface{}, stream ServerStream) error + +// StreamDesc represents a streaming RPC service's method specification. +type StreamDesc struct { + StreamName string + Handler StreamHandler + + // At least one of these is true. + ServerStreams bool + ClientStreams bool +} + +// Stream defines the common interface a client or server stream has to satisfy. +// +// All errors returned from Stream are compatible with the status package. +type Stream interface { + // Context returns the context for this stream. + Context() context.Context + // SendMsg blocks until it sends m, the stream is done or the stream + // breaks. + // On error, it aborts the stream and returns an RPC status on client + // side. On server side, it simply returns the error to the caller. + // SendMsg is called by generated code. Also Users can call SendMsg + // directly when it is really needed in their use cases. + // It's safe to have a goroutine calling SendMsg and another goroutine calling + // recvMsg on the same stream at the same time. + // But it is not safe to call SendMsg on the same stream in different goroutines. + SendMsg(m interface{}) error + // RecvMsg blocks until it receives a message or the stream is + // done. On client side, it returns io.EOF when the stream is done. On + // any other error, it aborts the stream and returns an RPC status. On + // server side, it simply returns the error to the caller. + // It's safe to have a goroutine calling SendMsg and another goroutine calling + // recvMsg on the same stream at the same time. + // But it is not safe to call RecvMsg on the same stream in different goroutines. + RecvMsg(m interface{}) error +} + +// ClientStream defines the interface a client stream has to satisfy. +type ClientStream interface { + // Header returns the header metadata received from the server if there + // is any. It blocks if the metadata is not ready to read. + Header() (metadata.MD, error) + // Trailer returns the trailer metadata from the server, if there is any. + // It must only be called after stream.CloseAndRecv has returned, or + // stream.Recv has returned a non-nil error (including io.EOF). + Trailer() metadata.MD + // CloseSend closes the send direction of the stream. It closes the stream + // when non-nil error is met. + CloseSend() error + // Stream.SendMsg() may return a non-nil error when something wrong happens sending + // the request. The returned error indicates the status of this sending, not the final + // status of the RPC. + // + // Always call Stream.RecvMsg() to drain the stream and get the final + // status, otherwise there could be leaked resources. + Stream +} + +// NewStream creates a new Stream for the client side. This is typically +// called by generated code. +func (cc *ClientConn) NewStream(ctx context.Context, desc *StreamDesc, method string, opts ...CallOption) (ClientStream, error) { + // allow interceptor to see all applicable call options, which means those + // configured as defaults from dial option as well as per-call options + opts = combine(cc.dopts.callOptions, opts) + + if cc.dopts.streamInt != nil { + return cc.dopts.streamInt(ctx, desc, cc, method, newClientStream, opts...) + } + return newClientStream(ctx, desc, cc, method, opts...) +} + +// NewClientStream creates a new Stream for the client side. This is typically +// called by generated code. +// +// DEPRECATED: Use ClientConn.NewStream instead. +func NewClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (ClientStream, error) { + return cc.NewStream(ctx, desc, method, opts...) +} + +func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (_ ClientStream, err error) { + if channelz.IsOn() { + cc.incrCallsStarted() + defer func() { + if err != nil { + cc.incrCallsFailed() + } + }() + } + c := defaultCallInfo() + mc := cc.GetMethodConfig(method) + if mc.WaitForReady != nil { + c.failFast = !*mc.WaitForReady + } + + // Possible context leak: + // The cancel function for the child context we create will only be called + // when RecvMsg returns a non-nil error, if the ClientConn is closed, or if + // an error is generated by SendMsg. + // https://github.com/grpc/grpc-go/issues/1818. + var cancel context.CancelFunc + if mc.Timeout != nil && *mc.Timeout >= 0 { + ctx, cancel = context.WithTimeout(ctx, *mc.Timeout) + } else { + ctx, cancel = context.WithCancel(ctx) + } + defer func() { + if err != nil { + cancel() + } + }() + + for _, o := range opts { + if err := o.before(c); err != nil { + return nil, toRPCErr(err) + } + } + c.maxSendMessageSize = getMaxSize(mc.MaxReqSize, c.maxSendMessageSize, defaultClientMaxSendMessageSize) + c.maxReceiveMessageSize = getMaxSize(mc.MaxRespSize, c.maxReceiveMessageSize, defaultClientMaxReceiveMessageSize) + if err := setCallInfoCodec(c); err != nil { + return nil, err + } + + callHdr := &transport.CallHdr{ + Host: cc.authority, + Method: method, + // If it's not client streaming, we should already have the request to be sent, + // so we don't flush the header. + // If it's client streaming, the user may never send a request or send it any + // time soon, so we ask the transport to flush the header. + Flush: desc.ClientStreams, + ContentSubtype: c.contentSubtype, + } + + // Set our outgoing compression according to the UseCompressor CallOption, if + // set. In that case, also find the compressor from the encoding package. + // Otherwise, use the compressor configured by the WithCompressor DialOption, + // if set. + var cp Compressor + var comp encoding.Compressor + if ct := c.compressorType; ct != "" { + callHdr.SendCompress = ct + if ct != encoding.Identity { + comp = encoding.GetCompressor(ct) + if comp == nil { + return nil, status.Errorf(codes.Internal, "grpc: Compressor is not installed for requested grpc-encoding %q", ct) + } + } + } else if cc.dopts.cp != nil { + callHdr.SendCompress = cc.dopts.cp.Type() + cp = cc.dopts.cp + } + if c.creds != nil { + callHdr.Creds = c.creds + } + var trInfo traceInfo + if EnableTracing { + trInfo.tr = trace.New("grpc.Sent."+methodFamily(method), method) + trInfo.firstLine.client = true + if deadline, ok := ctx.Deadline(); ok { + trInfo.firstLine.deadline = deadline.Sub(time.Now()) + } + trInfo.tr.LazyLog(&trInfo.firstLine, false) + ctx = trace.NewContext(ctx, trInfo.tr) + defer func() { + if err != nil { + // Need to call tr.finish() if error is returned. + // Because tr will not be returned to caller. + trInfo.tr.LazyPrintf("RPC: [%v]", err) + trInfo.tr.SetError() + trInfo.tr.Finish() + } + }() + } + ctx = newContextWithRPCInfo(ctx, c.failFast) + sh := cc.dopts.copts.StatsHandler + var beginTime time.Time + if sh != nil { + ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: method, FailFast: c.failFast}) + beginTime = time.Now() + begin := &stats.Begin{ + Client: true, + BeginTime: beginTime, + FailFast: c.failFast, + } + sh.HandleRPC(ctx, begin) + defer func() { + if err != nil { + // Only handle end stats if err != nil. + end := &stats.End{ + Client: true, + Error: err, + BeginTime: beginTime, + EndTime: time.Now(), + } + sh.HandleRPC(ctx, end) + } + }() + } + + var ( + t transport.ClientTransport + s *transport.Stream + done func(balancer.DoneInfo) + ) + for { + // Check to make sure the context has expired. This will prevent us from + // looping forever if an error occurs for wait-for-ready RPCs where no data + // is sent on the wire. + select { + case <-ctx.Done(): + return nil, toRPCErr(ctx.Err()) + default: + } + + t, done, err = cc.getTransport(ctx, c.failFast) + if err != nil { + return nil, err + } + + s, err = t.NewStream(ctx, callHdr) + if err != nil { + if done != nil { + done(balancer.DoneInfo{Err: err}) + done = nil + } + // In the event of any error from NewStream, we never attempted to write + // anything to the wire, so we can retry indefinitely for non-fail-fast + // RPCs. + if !c.failFast { + continue + } + return nil, toRPCErr(err) + } + break + } + + cs := &clientStream{ + opts: opts, + c: c, + cc: cc, + desc: desc, + codec: c.codec, + cp: cp, + comp: comp, + cancel: cancel, + attempt: &csAttempt{ + t: t, + s: s, + p: &parser{r: s}, + done: done, + dc: cc.dopts.dc, + ctx: ctx, + trInfo: trInfo, + statsHandler: sh, + beginTime: beginTime, + }, + } + cs.c.stream = cs + cs.attempt.cs = cs + if desc != unaryStreamDesc { + // Listen on cc and stream contexts to cleanup when the user closes the + // ClientConn or cancels the stream context. In all other cases, an error + // should already be injected into the recv buffer by the transport, which + // the client will eventually receive, and then we will cancel the stream's + // context in clientStream.finish. + go func() { + select { + case <-cc.ctx.Done(): + cs.finish(ErrClientConnClosing) + case <-ctx.Done(): + cs.finish(toRPCErr(ctx.Err())) + } + }() + } + return cs, nil +} + +// clientStream implements a client side Stream. +type clientStream struct { + opts []CallOption + c *callInfo + cc *ClientConn + desc *StreamDesc + + codec baseCodec + cp Compressor + comp encoding.Compressor + + cancel context.CancelFunc // cancels all attempts + + sentLast bool // sent an end stream + + mu sync.Mutex // guards finished + finished bool // TODO: replace with atomic cmpxchg or sync.Once? + + attempt *csAttempt // the active client stream attempt + // TODO(hedging): hedging will have multiple attempts simultaneously. +} + +// csAttempt implements a single transport stream attempt within a +// clientStream. +type csAttempt struct { + cs *clientStream + t transport.ClientTransport + s *transport.Stream + p *parser + done func(balancer.DoneInfo) + + dc Decompressor + decomp encoding.Compressor + decompSet bool + + ctx context.Context // the application's context, wrapped by stats/tracing + + mu sync.Mutex // guards trInfo.tr + // trInfo.tr is set when created (if EnableTracing is true), + // and cleared when the finish method is called. + trInfo traceInfo + + statsHandler stats.Handler + beginTime time.Time +} + +func (cs *clientStream) Context() context.Context { + // TODO(retry): commit the current attempt (the context has peer-aware data). + return cs.attempt.context() +} + +func (cs *clientStream) Header() (metadata.MD, error) { + m, err := cs.attempt.header() + if err != nil { + // TODO(retry): maybe retry on error or commit attempt on success. + err = toRPCErr(err) + cs.finish(err) + } + return m, err +} + +func (cs *clientStream) Trailer() metadata.MD { + // TODO(retry): on error, maybe retry (trailers-only). + return cs.attempt.trailer() +} + +func (cs *clientStream) SendMsg(m interface{}) (err error) { + // TODO(retry): buffer message for replaying if not committed. + return cs.attempt.sendMsg(m) +} + +func (cs *clientStream) RecvMsg(m interface{}) (err error) { + // TODO(retry): maybe retry on error or commit attempt on success. + return cs.attempt.recvMsg(m) +} + +func (cs *clientStream) CloseSend() error { + cs.attempt.closeSend() + return nil +} + +func (cs *clientStream) finish(err error) { + if err == io.EOF { + // Ending a stream with EOF indicates a success. + err = nil + } + cs.mu.Lock() + if cs.finished { + cs.mu.Unlock() + return + } + cs.finished = true + cs.mu.Unlock() + if channelz.IsOn() { + if err != nil { + cs.cc.incrCallsFailed() + } else { + cs.cc.incrCallsSucceeded() + } + } + // TODO(retry): commit current attempt if necessary. + cs.attempt.finish(err) + for _, o := range cs.opts { + o.after(cs.c) + } + cs.cancel() +} + +func (a *csAttempt) context() context.Context { + return a.s.Context() +} + +func (a *csAttempt) header() (metadata.MD, error) { + return a.s.Header() +} + +func (a *csAttempt) trailer() metadata.MD { + return a.s.Trailer() +} + +func (a *csAttempt) sendMsg(m interface{}) (err error) { + // TODO Investigate how to signal the stats handling party. + // generate error stats if err != nil && err != io.EOF? + cs := a.cs + defer func() { + // For non-client-streaming RPCs, we return nil instead of EOF on success + // because the generated code requires it. finish is not called; RecvMsg() + // will call it with the stream's status independently. + if err == io.EOF && !cs.desc.ClientStreams { + err = nil + } + if err != nil && err != io.EOF { + // Call finish on the client stream for errors generated by this SendMsg + // call, as these indicate problems created by this client. (Transport + // errors are converted to an io.EOF error below; the real error will be + // returned from RecvMsg eventually in that case, or be retried.) + cs.finish(err) + } + }() + // TODO: Check cs.sentLast and error if we already ended the stream. + if EnableTracing { + a.mu.Lock() + if a.trInfo.tr != nil { + a.trInfo.tr.LazyLog(&payload{sent: true, msg: m}, true) + } + a.mu.Unlock() + } + var outPayload *stats.OutPayload + if a.statsHandler != nil { + outPayload = &stats.OutPayload{ + Client: true, + } + } + hdr, data, err := encode(cs.codec, m, cs.cp, outPayload, cs.comp) + if err != nil { + return err + } + if len(data) > *cs.c.maxSendMessageSize { + return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(data), *cs.c.maxSendMessageSize) + } + if !cs.desc.ClientStreams { + cs.sentLast = true + } + err = a.t.Write(a.s, hdr, data, &transport.Options{Last: !cs.desc.ClientStreams}) + if err == nil { + if outPayload != nil { + outPayload.SentTime = time.Now() + a.statsHandler.HandleRPC(a.ctx, outPayload) + } + if channelz.IsOn() { + a.t.IncrMsgSent() + } + return nil + } + return io.EOF +} + +func (a *csAttempt) recvMsg(m interface{}) (err error) { + cs := a.cs + defer func() { + if err != nil || !cs.desc.ServerStreams { + // err != nil or non-server-streaming indicates end of stream. + cs.finish(err) + } + }() + var inPayload *stats.InPayload + if a.statsHandler != nil { + inPayload = &stats.InPayload{ + Client: true, + } + } + if !a.decompSet { + // Block until we receive headers containing received message encoding. + if ct := a.s.RecvCompress(); ct != "" && ct != encoding.Identity { + if a.dc == nil || a.dc.Type() != ct { + // No configured decompressor, or it does not match the incoming + // message encoding; attempt to find a registered compressor that does. + a.dc = nil + a.decomp = encoding.GetCompressor(ct) + } + } else { + // No compression is used; disable our decompressor. + a.dc = nil + } + // Only initialize this state once per stream. + a.decompSet = true + } + err = recv(a.p, cs.codec, a.s, a.dc, m, *cs.c.maxReceiveMessageSize, inPayload, a.decomp) + if err != nil { + if err == io.EOF { + if statusErr := a.s.Status().Err(); statusErr != nil { + return statusErr + } + return io.EOF // indicates successful end of stream. + } + return toRPCErr(err) + } + if EnableTracing { + a.mu.Lock() + if a.trInfo.tr != nil { + a.trInfo.tr.LazyLog(&payload{sent: false, msg: m}, true) + } + a.mu.Unlock() + } + if inPayload != nil { + a.statsHandler.HandleRPC(a.ctx, inPayload) + } + if channelz.IsOn() { + a.t.IncrMsgRecv() + } + if cs.desc.ServerStreams { + // Subsequent messages should be received by subsequent RecvMsg calls. + return nil + } + + // Special handling for non-server-stream rpcs. + // This recv expects EOF or errors, so we don't collect inPayload. + err = recv(a.p, cs.codec, a.s, a.dc, m, *cs.c.maxReceiveMessageSize, nil, a.decomp) + if err == nil { + return toRPCErr(errors.New("grpc: client streaming protocol violation: get , want ")) + } + if err == io.EOF { + return a.s.Status().Err() // non-server streaming Recv returns nil on success + } + return toRPCErr(err) +} + +func (a *csAttempt) closeSend() { + cs := a.cs + if cs.sentLast { + return + } + cs.sentLast = true + cs.attempt.t.Write(cs.attempt.s, nil, nil, &transport.Options{Last: true}) + // We ignore errors from Write. Any error it would return would also be + // returned by a subsequent RecvMsg call, and the user is supposed to always + // finish the stream by calling RecvMsg until it returns err != nil. +} + +func (a *csAttempt) finish(err error) { + a.mu.Lock() + a.t.CloseStream(a.s, err) + + if a.done != nil { + a.done(balancer.DoneInfo{ + Err: err, + BytesSent: true, + BytesReceived: a.s.BytesReceived(), + }) + } + if a.statsHandler != nil { + end := &stats.End{ + Client: true, + BeginTime: a.beginTime, + EndTime: time.Now(), + Error: err, + } + a.statsHandler.HandleRPC(a.ctx, end) + } + if a.trInfo.tr != nil { + if err == nil { + a.trInfo.tr.LazyPrintf("RPC: [OK]") + } else { + a.trInfo.tr.LazyPrintf("RPC: [%v]", err) + a.trInfo.tr.SetError() + } + a.trInfo.tr.Finish() + a.trInfo.tr = nil + } + a.mu.Unlock() +} + +// ServerStream defines the interface a server stream has to satisfy. +type ServerStream interface { + // SetHeader sets the header metadata. It may be called multiple times. + // When call multiple times, all the provided metadata will be merged. + // All the metadata will be sent out when one of the following happens: + // - ServerStream.SendHeader() is called; + // - The first response is sent out; + // - An RPC status is sent out (error or success). + SetHeader(metadata.MD) error + // SendHeader sends the header metadata. + // The provided md and headers set by SetHeader() will be sent. + // It fails if called multiple times. + SendHeader(metadata.MD) error + // SetTrailer sets the trailer metadata which will be sent with the RPC status. + // When called more than once, all the provided metadata will be merged. + SetTrailer(metadata.MD) + Stream +} + +// serverStream implements a server side Stream. +type serverStream struct { + ctx context.Context + t transport.ServerTransport + s *transport.Stream + p *parser + codec baseCodec + + cp Compressor + dc Decompressor + comp encoding.Compressor + decomp encoding.Compressor + + maxReceiveMessageSize int + maxSendMessageSize int + trInfo *traceInfo + + statsHandler stats.Handler + + mu sync.Mutex // protects trInfo.tr after the service handler runs. +} + +func (ss *serverStream) Context() context.Context { + return ss.ctx +} + +func (ss *serverStream) SetHeader(md metadata.MD) error { + if md.Len() == 0 { + return nil + } + return ss.s.SetHeader(md) +} + +func (ss *serverStream) SendHeader(md metadata.MD) error { + return ss.t.WriteHeader(ss.s, md) +} + +func (ss *serverStream) SetTrailer(md metadata.MD) { + if md.Len() == 0 { + return + } + ss.s.SetTrailer(md) +} + +func (ss *serverStream) SendMsg(m interface{}) (err error) { + defer func() { + if ss.trInfo != nil { + ss.mu.Lock() + if ss.trInfo.tr != nil { + if err == nil { + ss.trInfo.tr.LazyLog(&payload{sent: true, msg: m}, true) + } else { + ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) + ss.trInfo.tr.SetError() + } + } + ss.mu.Unlock() + } + if err != nil && err != io.EOF { + st, _ := status.FromError(toRPCErr(err)) + ss.t.WriteStatus(ss.s, st) + } + if channelz.IsOn() && err == nil { + ss.t.IncrMsgSent() + } + }() + var outPayload *stats.OutPayload + if ss.statsHandler != nil { + outPayload = &stats.OutPayload{} + } + hdr, data, err := encode(ss.codec, m, ss.cp, outPayload, ss.comp) + if err != nil { + return err + } + if len(data) > ss.maxSendMessageSize { + return status.Errorf(codes.ResourceExhausted, "trying to send message larger than max (%d vs. %d)", len(data), ss.maxSendMessageSize) + } + if err := ss.t.Write(ss.s, hdr, data, &transport.Options{Last: false}); err != nil { + return toRPCErr(err) + } + if outPayload != nil { + outPayload.SentTime = time.Now() + ss.statsHandler.HandleRPC(ss.s.Context(), outPayload) + } + return nil +} + +func (ss *serverStream) RecvMsg(m interface{}) (err error) { + defer func() { + if ss.trInfo != nil { + ss.mu.Lock() + if ss.trInfo.tr != nil { + if err == nil { + ss.trInfo.tr.LazyLog(&payload{sent: false, msg: m}, true) + } else if err != io.EOF { + ss.trInfo.tr.LazyLog(&fmtStringer{"%v", []interface{}{err}}, true) + ss.trInfo.tr.SetError() + } + } + ss.mu.Unlock() + } + if err != nil && err != io.EOF { + st, _ := status.FromError(toRPCErr(err)) + ss.t.WriteStatus(ss.s, st) + } + if channelz.IsOn() && err == nil { + ss.t.IncrMsgRecv() + } + }() + var inPayload *stats.InPayload + if ss.statsHandler != nil { + inPayload = &stats.InPayload{} + } + if err := recv(ss.p, ss.codec, ss.s, ss.dc, m, ss.maxReceiveMessageSize, inPayload, ss.decomp); err != nil { + if err == io.EOF { + return err + } + if err == io.ErrUnexpectedEOF { + err = status.Errorf(codes.Internal, io.ErrUnexpectedEOF.Error()) + } + return toRPCErr(err) + } + if inPayload != nil { + ss.statsHandler.HandleRPC(ss.s.Context(), inPayload) + } + return nil +} + +// MethodFromServerStream returns the method string for the input stream. +// The returned string is in the format of "/service/method". +func MethodFromServerStream(stream ServerStream) (string, bool) { + return Method(stream.Context()) +} diff --git a/vendor/google.golang.org/grpc/tap/tap.go b/vendor/google.golang.org/grpc/tap/tap.go new file mode 100644 index 00000000000..22b8fb50dea --- /dev/null +++ b/vendor/google.golang.org/grpc/tap/tap.go @@ -0,0 +1,51 @@ +/* + * + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package tap defines the function handles which are executed on the transport +// layer of gRPC-Go and related information. Everything here is EXPERIMENTAL. +package tap + +import ( + "golang.org/x/net/context" +) + +// Info defines the relevant information needed by the handles. +type Info struct { + // FullMethodName is the string of grpc method (in the format of + // /package.service/method). + FullMethodName string + // TODO: More to be added. +} + +// ServerInHandle defines the function which runs before a new stream is created +// on the server side. If it returns a non-nil error, the stream will not be +// created and a RST_STREAM will be sent back to the client with REFUSED_STREAM. +// The client will receive an RPC error "code = Unavailable, desc = stream +// terminated by RST_STREAM with error code: REFUSED_STREAM". +// +// It's intended to be used in situations where you don't want to waste the +// resources to accept the new stream (e.g. rate-limiting). And the content of +// the error will be ignored and won't be sent back to the client. For other +// general usages, please use interceptors. +// +// Note that it is executed in the per-connection I/O goroutine(s) instead of +// per-RPC goroutine. Therefore, users should NOT have any +// blocking/time-consuming work in this handle. Otherwise all the RPCs would +// slow down. Also, for the same reason, this handle won't be called +// concurrently by gRPC. +type ServerInHandle func(ctx context.Context, info *Info) (context.Context, error) diff --git a/vendor/google.golang.org/grpc/trace.go b/vendor/google.golang.org/grpc/trace.go new file mode 100644 index 00000000000..c1c96dedcb7 --- /dev/null +++ b/vendor/google.golang.org/grpc/trace.go @@ -0,0 +1,113 @@ +/* + * + * Copyright 2015 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package grpc + +import ( + "bytes" + "fmt" + "io" + "net" + "strings" + "time" + + "golang.org/x/net/trace" +) + +// EnableTracing controls whether to trace RPCs using the golang.org/x/net/trace package. +// This should only be set before any RPCs are sent or received by this program. +var EnableTracing bool + +// methodFamily returns the trace family for the given method. +// It turns "/pkg.Service/GetFoo" into "pkg.Service". +func methodFamily(m string) string { + m = strings.TrimPrefix(m, "/") // remove leading slash + if i := strings.Index(m, "/"); i >= 0 { + m = m[:i] // remove everything from second slash + } + if i := strings.LastIndex(m, "."); i >= 0 { + m = m[i+1:] // cut down to last dotted component + } + return m +} + +// traceInfo contains tracing information for an RPC. +type traceInfo struct { + tr trace.Trace + firstLine firstLine +} + +// firstLine is the first line of an RPC trace. +type firstLine struct { + client bool // whether this is a client (outgoing) RPC + remoteAddr net.Addr + deadline time.Duration // may be zero +} + +func (f *firstLine) String() string { + var line bytes.Buffer + io.WriteString(&line, "RPC: ") + if f.client { + io.WriteString(&line, "to") + } else { + io.WriteString(&line, "from") + } + fmt.Fprintf(&line, " %v deadline:", f.remoteAddr) + if f.deadline != 0 { + fmt.Fprint(&line, f.deadline) + } else { + io.WriteString(&line, "none") + } + return line.String() +} + +const truncateSize = 100 + +func truncate(x string, l int) string { + if l > len(x) { + return x + } + return x[:l] +} + +// payload represents an RPC request or response payload. +type payload struct { + sent bool // whether this is an outgoing payload + msg interface{} // e.g. a proto.Message + // TODO(dsymonds): add stringifying info to codec, and limit how much we hold here? +} + +func (p payload) String() string { + if p.sent { + return truncate(fmt.Sprintf("sent: %v", p.msg), truncateSize) + } + return truncate(fmt.Sprintf("recv: %v", p.msg), truncateSize) +} + +type fmtStringer struct { + format string + a []interface{} +} + +func (f *fmtStringer) String() string { + return fmt.Sprintf(f.format, f.a...) +} + +type stringer string + +func (s stringer) String() string { return string(s) } diff --git a/vendor/google.golang.org/grpc/transport/bdp_estimator.go b/vendor/google.golang.org/grpc/transport/bdp_estimator.go new file mode 100644 index 00000000000..63cd2627c87 --- /dev/null +++ b/vendor/google.golang.org/grpc/transport/bdp_estimator.go @@ -0,0 +1,140 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package transport + +import ( + "sync" + "time" +) + +const ( + // bdpLimit is the maximum value the flow control windows + // will be increased to. + bdpLimit = (1 << 20) * 4 + // alpha is a constant factor used to keep a moving average + // of RTTs. + alpha = 0.9 + // If the current bdp sample is greater than or equal to + // our beta * our estimated bdp and the current bandwidth + // sample is the maximum bandwidth observed so far, we + // increase our bbp estimate by a factor of gamma. + beta = 0.66 + // To put our bdp to be smaller than or equal to twice the real BDP, + // we should multiply our current sample with 4/3, however to round things out + // we use 2 as the multiplication factor. + gamma = 2 +) + +// Adding arbitrary data to ping so that its ack can be identified. +// Easter-egg: what does the ping message say? +var bdpPing = &ping{data: [8]byte{2, 4, 16, 16, 9, 14, 7, 7}} + +type bdpEstimator struct { + // sentAt is the time when the ping was sent. + sentAt time.Time + + mu sync.Mutex + // bdp is the current bdp estimate. + bdp uint32 + // sample is the number of bytes received in one measurement cycle. + sample uint32 + // bwMax is the maximum bandwidth noted so far (bytes/sec). + bwMax float64 + // bool to keep track of the beginning of a new measurement cycle. + isSent bool + // Callback to update the window sizes. + updateFlowControl func(n uint32) + // sampleCount is the number of samples taken so far. + sampleCount uint64 + // round trip time (seconds) + rtt float64 +} + +// timesnap registers the time bdp ping was sent out so that +// network rtt can be calculated when its ack is received. +// It is called (by controller) when the bdpPing is +// being written on the wire. +func (b *bdpEstimator) timesnap(d [8]byte) { + if bdpPing.data != d { + return + } + b.sentAt = time.Now() +} + +// add adds bytes to the current sample for calculating bdp. +// It returns true only if a ping must be sent. This can be used +// by the caller (handleData) to make decision about batching +// a window update with it. +func (b *bdpEstimator) add(n uint32) bool { + b.mu.Lock() + defer b.mu.Unlock() + if b.bdp == bdpLimit { + return false + } + if !b.isSent { + b.isSent = true + b.sample = n + b.sentAt = time.Time{} + b.sampleCount++ + return true + } + b.sample += n + return false +} + +// calculate is called when an ack for a bdp ping is received. +// Here we calculate the current bdp and bandwidth sample and +// decide if the flow control windows should go up. +func (b *bdpEstimator) calculate(d [8]byte) { + // Check if the ping acked for was the bdp ping. + if bdpPing.data != d { + return + } + b.mu.Lock() + rttSample := time.Since(b.sentAt).Seconds() + if b.sampleCount < 10 { + // Bootstrap rtt with an average of first 10 rtt samples. + b.rtt += (rttSample - b.rtt) / float64(b.sampleCount) + } else { + // Heed to the recent past more. + b.rtt += (rttSample - b.rtt) * float64(alpha) + } + b.isSent = false + // The number of bytes accumulated so far in the sample is smaller + // than or equal to 1.5 times the real BDP on a saturated connection. + bwCurrent := float64(b.sample) / (b.rtt * float64(1.5)) + if bwCurrent > b.bwMax { + b.bwMax = bwCurrent + } + // If the current sample (which is smaller than or equal to the 1.5 times the real BDP) is + // greater than or equal to 2/3rd our perceived bdp AND this is the maximum bandwidth seen so far, we + // should update our perception of the network BDP. + if float64(b.sample) >= beta*float64(b.bdp) && bwCurrent == b.bwMax && b.bdp != bdpLimit { + sampleFloat := float64(b.sample) + b.bdp = uint32(gamma * sampleFloat) + if b.bdp > bdpLimit { + b.bdp = bdpLimit + } + bdp := b.bdp + b.mu.Unlock() + b.updateFlowControl(bdp) + return + } + b.mu.Unlock() +} diff --git a/vendor/google.golang.org/grpc/transport/controlbuf.go b/vendor/google.golang.org/grpc/transport/controlbuf.go new file mode 100644 index 00000000000..e147cd51bf1 --- /dev/null +++ b/vendor/google.golang.org/grpc/transport/controlbuf.go @@ -0,0 +1,769 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package transport + +import ( + "bytes" + "fmt" + "runtime" + "sync" + + "golang.org/x/net/http2" + "golang.org/x/net/http2/hpack" +) + +type itemNode struct { + it interface{} + next *itemNode +} + +type itemList struct { + head *itemNode + tail *itemNode +} + +func (il *itemList) enqueue(i interface{}) { + n := &itemNode{it: i} + if il.tail == nil { + il.head, il.tail = n, n + return + } + il.tail.next = n + il.tail = n +} + +// peek returns the first item in the list without removing it from the +// list. +func (il *itemList) peek() interface{} { + return il.head.it +} + +func (il *itemList) dequeue() interface{} { + if il.head == nil { + return nil + } + i := il.head.it + il.head = il.head.next + if il.head == nil { + il.tail = nil + } + return i +} + +func (il *itemList) dequeueAll() *itemNode { + h := il.head + il.head, il.tail = nil, nil + return h +} + +func (il *itemList) isEmpty() bool { + return il.head == nil +} + +// The following defines various control items which could flow through +// the control buffer of transport. They represent different aspects of +// control tasks, e.g., flow control, settings, streaming resetting, etc. + +type headerFrame struct { + streamID uint32 + hf []hpack.HeaderField + endStream bool // Valid on server side. + initStream func(uint32) (bool, error) // Used only on the client side. + onWrite func() + wq *writeQuota // write quota for the stream created. + cleanup *cleanupStream // Valid on the server side. + onOrphaned func(error) // Valid on client-side +} + +type cleanupStream struct { + streamID uint32 + idPtr *uint32 + rst bool + rstCode http2.ErrCode + onWrite func() +} + +type dataFrame struct { + streamID uint32 + endStream bool + h []byte + d []byte + // onEachWrite is called every time + // a part of d is written out. + onEachWrite func() +} + +type incomingWindowUpdate struct { + streamID uint32 + increment uint32 +} + +type outgoingWindowUpdate struct { + streamID uint32 + increment uint32 +} + +type incomingSettings struct { + ss []http2.Setting +} + +type outgoingSettings struct { + ss []http2.Setting +} + +type settingsAck struct { +} + +type incomingGoAway struct { +} + +type goAway struct { + code http2.ErrCode + debugData []byte + headsUp bool + closeConn bool +} + +type ping struct { + ack bool + data [8]byte +} + +type outFlowControlSizeRequest struct { + resp chan uint32 +} + +type outStreamState int + +const ( + active outStreamState = iota + empty + waitingOnStreamQuota +) + +type outStream struct { + id uint32 + state outStreamState + itl *itemList + bytesOutStanding int + wq *writeQuota + + next *outStream + prev *outStream +} + +func (s *outStream) deleteSelf() { + if s.prev != nil { + s.prev.next = s.next + } + if s.next != nil { + s.next.prev = s.prev + } + s.next, s.prev = nil, nil +} + +type outStreamList struct { + // Following are sentinel objects that mark the + // beginning and end of the list. They do not + // contain any item lists. All valid objects are + // inserted in between them. + // This is needed so that an outStream object can + // deleteSelf() in O(1) time without knowing which + // list it belongs to. + head *outStream + tail *outStream +} + +func newOutStreamList() *outStreamList { + head, tail := new(outStream), new(outStream) + head.next = tail + tail.prev = head + return &outStreamList{ + head: head, + tail: tail, + } +} + +func (l *outStreamList) enqueue(s *outStream) { + e := l.tail.prev + e.next = s + s.prev = e + s.next = l.tail + l.tail.prev = s +} + +// remove from the beginning of the list. +func (l *outStreamList) dequeue() *outStream { + b := l.head.next + if b == l.tail { + return nil + } + b.deleteSelf() + return b +} + +type controlBuffer struct { + ch chan struct{} + done <-chan struct{} + mu sync.Mutex + consumerWaiting bool + list *itemList + err error +} + +func newControlBuffer(done <-chan struct{}) *controlBuffer { + return &controlBuffer{ + ch: make(chan struct{}, 1), + list: &itemList{}, + done: done, + } +} + +func (c *controlBuffer) put(it interface{}) error { + _, err := c.executeAndPut(nil, it) + return err +} + +func (c *controlBuffer) executeAndPut(f func(it interface{}) bool, it interface{}) (bool, error) { + var wakeUp bool + c.mu.Lock() + if c.err != nil { + c.mu.Unlock() + return false, c.err + } + if f != nil { + if !f(it) { // f wasn't successful + c.mu.Unlock() + return false, nil + } + } + if c.consumerWaiting { + wakeUp = true + c.consumerWaiting = false + } + c.list.enqueue(it) + c.mu.Unlock() + if wakeUp { + select { + case c.ch <- struct{}{}: + default: + } + } + return true, nil +} + +func (c *controlBuffer) get(block bool) (interface{}, error) { + for { + c.mu.Lock() + if c.err != nil { + c.mu.Unlock() + return nil, c.err + } + if !c.list.isEmpty() { + h := c.list.dequeue() + c.mu.Unlock() + return h, nil + } + if !block { + c.mu.Unlock() + return nil, nil + } + c.consumerWaiting = true + c.mu.Unlock() + select { + case <-c.ch: + case <-c.done: + c.finish() + return nil, ErrConnClosing + } + } +} + +func (c *controlBuffer) finish() { + c.mu.Lock() + if c.err != nil { + c.mu.Unlock() + return + } + c.err = ErrConnClosing + // There may be headers for streams in the control buffer. + // These streams need to be cleaned out since the transport + // is still not aware of these yet. + for head := c.list.dequeueAll(); head != nil; head = head.next { + hdr, ok := head.it.(*headerFrame) + if !ok { + continue + } + if hdr.onOrphaned != nil { // It will be nil on the server-side. + hdr.onOrphaned(ErrConnClosing) + } + } + c.mu.Unlock() +} + +type side int + +const ( + clientSide side = iota + serverSide +) + +type loopyWriter struct { + side side + cbuf *controlBuffer + sendQuota uint32 + oiws uint32 // outbound initial window size. + estdStreams map[uint32]*outStream // Established streams. + activeStreams *outStreamList // Streams that are sending data. + framer *framer + hBuf *bytes.Buffer // The buffer for HPACK encoding. + hEnc *hpack.Encoder // HPACK encoder. + bdpEst *bdpEstimator + draining bool + + // Side-specific handlers + ssGoAwayHandler func(*goAway) (bool, error) +} + +func newLoopyWriter(s side, fr *framer, cbuf *controlBuffer, bdpEst *bdpEstimator) *loopyWriter { + var buf bytes.Buffer + l := &loopyWriter{ + side: s, + cbuf: cbuf, + sendQuota: defaultWindowSize, + oiws: defaultWindowSize, + estdStreams: make(map[uint32]*outStream), + activeStreams: newOutStreamList(), + framer: fr, + hBuf: &buf, + hEnc: hpack.NewEncoder(&buf), + bdpEst: bdpEst, + } + return l +} + +const minBatchSize = 1000 + +// run should be run in a separate goroutine. +func (l *loopyWriter) run() { + var ( + it interface{} + err error + isEmpty bool + ) + defer func() { + errorf("transport: loopyWriter.run returning. Err: %v", err) + }() + for { + it, err = l.cbuf.get(true) + if err != nil { + return + } + if err = l.handle(it); err != nil { + return + } + if _, err = l.processData(); err != nil { + return + } + gosched := true + hasdata: + for { + it, err = l.cbuf.get(false) + if err != nil { + return + } + if it != nil { + if err = l.handle(it); err != nil { + return + } + if _, err = l.processData(); err != nil { + return + } + continue hasdata + } + if isEmpty, err = l.processData(); err != nil { + return + } + if !isEmpty { + continue hasdata + } + if gosched { + gosched = false + if l.framer.writer.offset < minBatchSize { + runtime.Gosched() + continue hasdata + } + } + l.framer.writer.Flush() + break hasdata + + } + } +} + +func (l *loopyWriter) outgoingWindowUpdateHandler(w *outgoingWindowUpdate) error { + return l.framer.fr.WriteWindowUpdate(w.streamID, w.increment) +} + +func (l *loopyWriter) incomingWindowUpdateHandler(w *incomingWindowUpdate) error { + // Otherwise update the quota. + if w.streamID == 0 { + l.sendQuota += w.increment + return nil + } + // Find the stream and update it. + if str, ok := l.estdStreams[w.streamID]; ok { + str.bytesOutStanding -= int(w.increment) + if strQuota := int(l.oiws) - str.bytesOutStanding; strQuota > 0 && str.state == waitingOnStreamQuota { + str.state = active + l.activeStreams.enqueue(str) + return nil + } + } + return nil +} + +func (l *loopyWriter) outgoingSettingsHandler(s *outgoingSettings) error { + return l.framer.fr.WriteSettings(s.ss...) +} + +func (l *loopyWriter) incomingSettingsHandler(s *incomingSettings) error { + if err := l.applySettings(s.ss); err != nil { + return err + } + return l.framer.fr.WriteSettingsAck() +} + +func (l *loopyWriter) headerHandler(h *headerFrame) error { + if l.side == serverSide { + if h.endStream { // Case 1.A: Server wants to close stream. + // Make sure it's not a trailers only response. + if str, ok := l.estdStreams[h.streamID]; ok { + if str.state != empty { // either active or waiting on stream quota. + // add it str's list of items. + str.itl.enqueue(h) + return nil + } + } + if err := l.writeHeader(h.streamID, h.endStream, h.hf, h.onWrite); err != nil { + return err + } + return l.cleanupStreamHandler(h.cleanup) + } + // Case 1.B: Server is responding back with headers. + str := &outStream{ + state: empty, + itl: &itemList{}, + wq: h.wq, + } + l.estdStreams[h.streamID] = str + return l.writeHeader(h.streamID, h.endStream, h.hf, h.onWrite) + } + // Case 2: Client wants to originate stream. + str := &outStream{ + id: h.streamID, + state: empty, + itl: &itemList{}, + wq: h.wq, + } + str.itl.enqueue(h) + return l.originateStream(str) +} + +func (l *loopyWriter) originateStream(str *outStream) error { + hdr := str.itl.dequeue().(*headerFrame) + sendPing, err := hdr.initStream(str.id) + if err != nil { + if err == ErrConnClosing { + return err + } + // Other errors(errStreamDrain) need not close transport. + return nil + } + if err = l.writeHeader(str.id, hdr.endStream, hdr.hf, hdr.onWrite); err != nil { + return err + } + l.estdStreams[str.id] = str + if sendPing { + return l.pingHandler(&ping{data: [8]byte{}}) + } + return nil +} + +func (l *loopyWriter) writeHeader(streamID uint32, endStream bool, hf []hpack.HeaderField, onWrite func()) error { + if onWrite != nil { + onWrite() + } + l.hBuf.Reset() + for _, f := range hf { + if err := l.hEnc.WriteField(f); err != nil { + warningf("transport: loopyWriter.writeHeader encountered error while encoding headers:", err) + } + } + var ( + err error + endHeaders, first bool + ) + first = true + for !endHeaders { + size := l.hBuf.Len() + if size > http2MaxFrameLen { + size = http2MaxFrameLen + } else { + endHeaders = true + } + if first { + first = false + err = l.framer.fr.WriteHeaders(http2.HeadersFrameParam{ + StreamID: streamID, + BlockFragment: l.hBuf.Next(size), + EndStream: endStream, + EndHeaders: endHeaders, + }) + } else { + err = l.framer.fr.WriteContinuation( + streamID, + endHeaders, + l.hBuf.Next(size), + ) + } + if err != nil { + return err + } + } + return nil +} + +func (l *loopyWriter) preprocessData(df *dataFrame) error { + str, ok := l.estdStreams[df.streamID] + if !ok { + return nil + } + // If we got data for a stream it means that + // stream was originated and the headers were sent out. + str.itl.enqueue(df) + if str.state == empty { + str.state = active + l.activeStreams.enqueue(str) + } + return nil +} + +func (l *loopyWriter) pingHandler(p *ping) error { + if !p.ack { + l.bdpEst.timesnap(p.data) + } + return l.framer.fr.WritePing(p.ack, p.data) + +} + +func (l *loopyWriter) outFlowControlSizeRequestHandler(o *outFlowControlSizeRequest) error { + o.resp <- l.sendQuota + return nil +} + +func (l *loopyWriter) cleanupStreamHandler(c *cleanupStream) error { + c.onWrite() + if str, ok := l.estdStreams[c.streamID]; ok { + // On the server side it could be a trailers-only response or + // a RST_STREAM before stream initialization thus the stream might + // not be established yet. + delete(l.estdStreams, c.streamID) + str.deleteSelf() + } + if c.rst { // If RST_STREAM needs to be sent. + if err := l.framer.fr.WriteRSTStream(c.streamID, c.rstCode); err != nil { + return err + } + } + if l.side == clientSide && l.draining && len(l.estdStreams) == 0 { + return ErrConnClosing + } + return nil +} + +func (l *loopyWriter) incomingGoAwayHandler(*incomingGoAway) error { + if l.side == clientSide { + l.draining = true + if len(l.estdStreams) == 0 { + return ErrConnClosing + } + } + return nil +} + +func (l *loopyWriter) goAwayHandler(g *goAway) error { + // Handling of outgoing GoAway is very specific to side. + if l.ssGoAwayHandler != nil { + draining, err := l.ssGoAwayHandler(g) + if err != nil { + return err + } + l.draining = draining + } + return nil +} + +func (l *loopyWriter) handle(i interface{}) error { + switch i := i.(type) { + case *incomingWindowUpdate: + return l.incomingWindowUpdateHandler(i) + case *outgoingWindowUpdate: + return l.outgoingWindowUpdateHandler(i) + case *incomingSettings: + return l.incomingSettingsHandler(i) + case *outgoingSettings: + return l.outgoingSettingsHandler(i) + case *headerFrame: + return l.headerHandler(i) + case *cleanupStream: + return l.cleanupStreamHandler(i) + case *incomingGoAway: + return l.incomingGoAwayHandler(i) + case *dataFrame: + return l.preprocessData(i) + case *ping: + return l.pingHandler(i) + case *goAway: + return l.goAwayHandler(i) + case *outFlowControlSizeRequest: + return l.outFlowControlSizeRequestHandler(i) + default: + return fmt.Errorf("transport: unknown control message type %T", i) + } +} + +func (l *loopyWriter) applySettings(ss []http2.Setting) error { + for _, s := range ss { + switch s.ID { + case http2.SettingInitialWindowSize: + o := l.oiws + l.oiws = s.Val + if o < l.oiws { + // If the new limit is greater make all depleted streams active. + for _, stream := range l.estdStreams { + if stream.state == waitingOnStreamQuota { + stream.state = active + l.activeStreams.enqueue(stream) + } + } + } + } + } + return nil +} + +func (l *loopyWriter) processData() (bool, error) { + if l.sendQuota == 0 { + return true, nil + } + str := l.activeStreams.dequeue() + if str == nil { + return true, nil + } + dataItem := str.itl.peek().(*dataFrame) + if len(dataItem.h) == 0 && len(dataItem.d) == 0 { + // Client sends out empty data frame with endStream = true + if err := l.framer.fr.WriteData(dataItem.streamID, dataItem.endStream, nil); err != nil { + return false, err + } + str.itl.dequeue() + if str.itl.isEmpty() { + str.state = empty + } else if trailer, ok := str.itl.peek().(*headerFrame); ok { // the next item is trailers. + if err := l.writeHeader(trailer.streamID, trailer.endStream, trailer.hf, trailer.onWrite); err != nil { + return false, err + } + if err := l.cleanupStreamHandler(trailer.cleanup); err != nil { + return false, nil + } + } else { + l.activeStreams.enqueue(str) + } + return false, nil + } + var ( + idx int + buf []byte + ) + if len(dataItem.h) != 0 { // data header has not been written out yet. + buf = dataItem.h + } else { + idx = 1 + buf = dataItem.d + } + size := http2MaxFrameLen + if len(buf) < size { + size = len(buf) + } + if strQuota := int(l.oiws) - str.bytesOutStanding; strQuota <= 0 { + str.state = waitingOnStreamQuota + return false, nil + } else if strQuota < size { + size = strQuota + } + + if l.sendQuota < uint32(size) { + size = int(l.sendQuota) + } + // Now that outgoing flow controls are checked we can replenish str's write quota + str.wq.replenish(size) + var endStream bool + // This last data message on this stream and all + // of it can be written in this go. + if dataItem.endStream && size == len(buf) { + // buf contains either data or it contains header but data is empty. + if idx == 1 || len(dataItem.d) == 0 { + endStream = true + } + } + if dataItem.onEachWrite != nil { + dataItem.onEachWrite() + } + if err := l.framer.fr.WriteData(dataItem.streamID, endStream, buf[:size]); err != nil { + return false, err + } + buf = buf[size:] + str.bytesOutStanding += size + l.sendQuota -= uint32(size) + if idx == 0 { + dataItem.h = buf + } else { + dataItem.d = buf + } + + if len(dataItem.h) == 0 && len(dataItem.d) == 0 { // All the data from that message was written out. + str.itl.dequeue() + } + if str.itl.isEmpty() { + str.state = empty + } else if trailer, ok := str.itl.peek().(*headerFrame); ok { // The next item is trailers. + if err := l.writeHeader(trailer.streamID, trailer.endStream, trailer.hf, trailer.onWrite); err != nil { + return false, err + } + if err := l.cleanupStreamHandler(trailer.cleanup); err != nil { + return false, err + } + } else if int(l.oiws)-str.bytesOutStanding <= 0 { // Ran out of stream quota. + str.state = waitingOnStreamQuota + } else { // Otherwise add it back to the list of active streams. + l.activeStreams.enqueue(str) + } + return false, nil +} diff --git a/vendor/google.golang.org/grpc/transport/flowcontrol.go b/vendor/google.golang.org/grpc/transport/flowcontrol.go new file mode 100644 index 00000000000..378f5c4502c --- /dev/null +++ b/vendor/google.golang.org/grpc/transport/flowcontrol.go @@ -0,0 +1,236 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package transport + +import ( + "fmt" + "math" + "sync" + "sync/atomic" + "time" +) + +const ( + // The default value of flow control window size in HTTP2 spec. + defaultWindowSize = 65535 + // The initial window size for flow control. + initialWindowSize = defaultWindowSize // for an RPC + infinity = time.Duration(math.MaxInt64) + defaultClientKeepaliveTime = infinity + defaultClientKeepaliveTimeout = 20 * time.Second + defaultMaxStreamsClient = 100 + defaultMaxConnectionIdle = infinity + defaultMaxConnectionAge = infinity + defaultMaxConnectionAgeGrace = infinity + defaultServerKeepaliveTime = 2 * time.Hour + defaultServerKeepaliveTimeout = 20 * time.Second + defaultKeepalivePolicyMinTime = 5 * time.Minute + // max window limit set by HTTP2 Specs. + maxWindowSize = math.MaxInt32 + // defaultWriteQuota is the default value for number of data + // bytes that each stream can schedule before some of it being + // flushed out. + defaultWriteQuota = 64 * 1024 +) + +// writeQuota is a soft limit on the amount of data a stream can +// schedule before some of it is written out. +type writeQuota struct { + quota int32 + // get waits on read from when quota goes less than or equal to zero. + // replenish writes on it when quota goes positive again. + ch chan struct{} + // done is triggered in error case. + done <-chan struct{} +} + +func newWriteQuota(sz int32, done <-chan struct{}) *writeQuota { + return &writeQuota{ + quota: sz, + ch: make(chan struct{}, 1), + done: done, + } +} + +func (w *writeQuota) get(sz int32) error { + for { + if atomic.LoadInt32(&w.quota) > 0 { + atomic.AddInt32(&w.quota, -sz) + return nil + } + select { + case <-w.ch: + continue + case <-w.done: + return errStreamDone + } + } +} + +func (w *writeQuota) replenish(n int) { + sz := int32(n) + a := atomic.AddInt32(&w.quota, sz) + b := a - sz + if b <= 0 && a > 0 { + select { + case w.ch <- struct{}{}: + default: + } + } +} + +type trInFlow struct { + limit uint32 + unacked uint32 + effectiveWindowSize uint32 +} + +func (f *trInFlow) newLimit(n uint32) uint32 { + d := n - f.limit + f.limit = n + f.updateEffectiveWindowSize() + return d +} + +func (f *trInFlow) onData(n uint32) uint32 { + f.unacked += n + if f.unacked >= f.limit/4 { + w := f.unacked + f.unacked = 0 + f.updateEffectiveWindowSize() + return w + } + f.updateEffectiveWindowSize() + return 0 +} + +func (f *trInFlow) reset() uint32 { + w := f.unacked + f.unacked = 0 + f.updateEffectiveWindowSize() + return w +} + +func (f *trInFlow) updateEffectiveWindowSize() { + atomic.StoreUint32(&f.effectiveWindowSize, f.limit-f.unacked) +} + +func (f *trInFlow) getSize() uint32 { + return atomic.LoadUint32(&f.effectiveWindowSize) +} + +// TODO(mmukhi): Simplify this code. +// inFlow deals with inbound flow control +type inFlow struct { + mu sync.Mutex + // The inbound flow control limit for pending data. + limit uint32 + // pendingData is the overall data which have been received but not been + // consumed by applications. + pendingData uint32 + // The amount of data the application has consumed but grpc has not sent + // window update for them. Used to reduce window update frequency. + pendingUpdate uint32 + // delta is the extra window update given by receiver when an application + // is reading data bigger in size than the inFlow limit. + delta uint32 +} + +// newLimit updates the inflow window to a new value n. +// It assumes that n is always greater than the old limit. +func (f *inFlow) newLimit(n uint32) uint32 { + f.mu.Lock() + d := n - f.limit + f.limit = n + f.mu.Unlock() + return d +} + +func (f *inFlow) maybeAdjust(n uint32) uint32 { + if n > uint32(math.MaxInt32) { + n = uint32(math.MaxInt32) + } + f.mu.Lock() + // estSenderQuota is the receiver's view of the maximum number of bytes the sender + // can send without a window update. + estSenderQuota := int32(f.limit - (f.pendingData + f.pendingUpdate)) + // estUntransmittedData is the maximum number of bytes the sends might not have put + // on the wire yet. A value of 0 or less means that we have already received all or + // more bytes than the application is requesting to read. + estUntransmittedData := int32(n - f.pendingData) // Casting into int32 since it could be negative. + // This implies that unless we send a window update, the sender won't be able to send all the bytes + // for this message. Therefore we must send an update over the limit since there's an active read + // request from the application. + if estUntransmittedData > estSenderQuota { + // Sender's window shouldn't go more than 2^31 - 1 as specified in the HTTP spec. + if f.limit+n > maxWindowSize { + f.delta = maxWindowSize - f.limit + } else { + // Send a window update for the whole message and not just the difference between + // estUntransmittedData and estSenderQuota. This will be helpful in case the message + // is padded; We will fallback on the current available window(at least a 1/4th of the limit). + f.delta = n + } + f.mu.Unlock() + return f.delta + } + f.mu.Unlock() + return 0 +} + +// onData is invoked when some data frame is received. It updates pendingData. +func (f *inFlow) onData(n uint32) error { + f.mu.Lock() + f.pendingData += n + if f.pendingData+f.pendingUpdate > f.limit+f.delta { + limit := f.limit + rcvd := f.pendingData + f.pendingUpdate + f.mu.Unlock() + return fmt.Errorf("received %d-bytes data exceeding the limit %d bytes", rcvd, limit) + } + f.mu.Unlock() + return nil +} + +// onRead is invoked when the application reads the data. It returns the window size +// to be sent to the peer. +func (f *inFlow) onRead(n uint32) uint32 { + f.mu.Lock() + if f.pendingData == 0 { + f.mu.Unlock() + return 0 + } + f.pendingData -= n + if n > f.delta { + n -= f.delta + f.delta = 0 + } else { + f.delta -= n + n = 0 + } + f.pendingUpdate += n + if f.pendingUpdate >= f.limit/4 { + wu := f.pendingUpdate + f.pendingUpdate = 0 + f.mu.Unlock() + return wu + } + f.mu.Unlock() + return 0 +} diff --git a/vendor/google.golang.org/grpc/transport/go16.go b/vendor/google.golang.org/grpc/transport/go16.go new file mode 100644 index 00000000000..5babcf9b877 --- /dev/null +++ b/vendor/google.golang.org/grpc/transport/go16.go @@ -0,0 +1,51 @@ +// +build go1.6,!go1.7 + +/* + * + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package transport + +import ( + "net" + "net/http" + + "google.golang.org/grpc/codes" + + "golang.org/x/net/context" +) + +// dialContext connects to the address on the named network. +func dialContext(ctx context.Context, network, address string) (net.Conn, error) { + return (&net.Dialer{Cancel: ctx.Done()}).Dial(network, address) +} + +// ContextErr converts the error from context package into a StreamError. +func ContextErr(err error) StreamError { + switch err { + case context.DeadlineExceeded: + return streamErrorf(codes.DeadlineExceeded, "%v", err) + case context.Canceled: + return streamErrorf(codes.Canceled, "%v", err) + } + return streamErrorf(codes.Internal, "Unexpected error from context packet: %v", err) +} + +// contextFromRequest returns a background context. +func contextFromRequest(r *http.Request) context.Context { + return context.Background() +} diff --git a/vendor/google.golang.org/grpc/transport/go17.go b/vendor/google.golang.org/grpc/transport/go17.go new file mode 100644 index 00000000000..b7fa6bdb9ca --- /dev/null +++ b/vendor/google.golang.org/grpc/transport/go17.go @@ -0,0 +1,52 @@ +// +build go1.7 + +/* + * + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package transport + +import ( + "context" + "net" + "net/http" + + "google.golang.org/grpc/codes" + + netctx "golang.org/x/net/context" +) + +// dialContext connects to the address on the named network. +func dialContext(ctx context.Context, network, address string) (net.Conn, error) { + return (&net.Dialer{}).DialContext(ctx, network, address) +} + +// ContextErr converts the error from context package into a StreamError. +func ContextErr(err error) StreamError { + switch err { + case context.DeadlineExceeded, netctx.DeadlineExceeded: + return streamErrorf(codes.DeadlineExceeded, "%v", err) + case context.Canceled, netctx.Canceled: + return streamErrorf(codes.Canceled, "%v", err) + } + return streamErrorf(codes.Internal, "Unexpected error from context packet: %v", err) +} + +// contextFromRequest returns a context from the HTTP Request. +func contextFromRequest(r *http.Request) context.Context { + return r.Context() +} diff --git a/vendor/google.golang.org/grpc/transport/handler_server.go b/vendor/google.golang.org/grpc/transport/handler_server.go new file mode 100644 index 00000000000..f71b7482174 --- /dev/null +++ b/vendor/google.golang.org/grpc/transport/handler_server.go @@ -0,0 +1,451 @@ +/* + * + * Copyright 2016 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// This file is the implementation of a gRPC server using HTTP/2 which +// uses the standard Go http2 Server implementation (via the +// http.Handler interface), rather than speaking low-level HTTP/2 +// frames itself. It is the implementation of *grpc.Server.ServeHTTP. + +package transport + +import ( + "errors" + "fmt" + "io" + "net" + "net/http" + "strings" + "sync" + "time" + + "github.com/golang/protobuf/proto" + "golang.org/x/net/context" + "golang.org/x/net/http2" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/peer" + "google.golang.org/grpc/stats" + "google.golang.org/grpc/status" +) + +// NewServerHandlerTransport returns a ServerTransport handling gRPC +// from inside an http.Handler. It requires that the http Server +// supports HTTP/2. +func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats stats.Handler) (ServerTransport, error) { + if r.ProtoMajor != 2 { + return nil, errors.New("gRPC requires HTTP/2") + } + if r.Method != "POST" { + return nil, errors.New("invalid gRPC request method") + } + contentType := r.Header.Get("Content-Type") + // TODO: do we assume contentType is lowercase? we did before + contentSubtype, validContentType := contentSubtype(contentType) + if !validContentType { + return nil, errors.New("invalid gRPC request content-type") + } + if _, ok := w.(http.Flusher); !ok { + return nil, errors.New("gRPC requires a ResponseWriter supporting http.Flusher") + } + if _, ok := w.(http.CloseNotifier); !ok { + return nil, errors.New("gRPC requires a ResponseWriter supporting http.CloseNotifier") + } + + st := &serverHandlerTransport{ + rw: w, + req: r, + closedCh: make(chan struct{}), + writes: make(chan func()), + contentType: contentType, + contentSubtype: contentSubtype, + stats: stats, + } + + if v := r.Header.Get("grpc-timeout"); v != "" { + to, err := decodeTimeout(v) + if err != nil { + return nil, streamErrorf(codes.Internal, "malformed time-out: %v", err) + } + st.timeoutSet = true + st.timeout = to + } + + metakv := []string{"content-type", contentType} + if r.Host != "" { + metakv = append(metakv, ":authority", r.Host) + } + for k, vv := range r.Header { + k = strings.ToLower(k) + if isReservedHeader(k) && !isWhitelistedHeader(k) { + continue + } + for _, v := range vv { + v, err := decodeMetadataHeader(k, v) + if err != nil { + return nil, streamErrorf(codes.Internal, "malformed binary metadata: %v", err) + } + metakv = append(metakv, k, v) + } + } + st.headerMD = metadata.Pairs(metakv...) + + return st, nil +} + +// serverHandlerTransport is an implementation of ServerTransport +// which replies to exactly one gRPC request (exactly one HTTP request), +// using the net/http.Handler interface. This http.Handler is guaranteed +// at this point to be speaking over HTTP/2, so it's able to speak valid +// gRPC. +type serverHandlerTransport struct { + rw http.ResponseWriter + req *http.Request + timeoutSet bool + timeout time.Duration + didCommonHeaders bool + + headerMD metadata.MD + + closeOnce sync.Once + closedCh chan struct{} // closed on Close + + // writes is a channel of code to run serialized in the + // ServeHTTP (HandleStreams) goroutine. The channel is closed + // when WriteStatus is called. + writes chan func() + + // block concurrent WriteStatus calls + // e.g. grpc/(*serverStream).SendMsg/RecvMsg + writeStatusMu sync.Mutex + + // we just mirror the request content-type + contentType string + // we store both contentType and contentSubtype so we don't keep recreating them + // TODO make sure this is consistent across handler_server and http2_server + contentSubtype string + + stats stats.Handler +} + +func (ht *serverHandlerTransport) Close() error { + ht.closeOnce.Do(ht.closeCloseChanOnce) + return nil +} + +func (ht *serverHandlerTransport) closeCloseChanOnce() { close(ht.closedCh) } + +func (ht *serverHandlerTransport) RemoteAddr() net.Addr { return strAddr(ht.req.RemoteAddr) } + +// strAddr is a net.Addr backed by either a TCP "ip:port" string, or +// the empty string if unknown. +type strAddr string + +func (a strAddr) Network() string { + if a != "" { + // Per the documentation on net/http.Request.RemoteAddr, if this is + // set, it's set to the IP:port of the peer (hence, TCP): + // https://golang.org/pkg/net/http/#Request + // + // If we want to support Unix sockets later, we can + // add our own grpc-specific convention within the + // grpc codebase to set RemoteAddr to a different + // format, or probably better: we can attach it to the + // context and use that from serverHandlerTransport.RemoteAddr. + return "tcp" + } + return "" +} + +func (a strAddr) String() string { return string(a) } + +// do runs fn in the ServeHTTP goroutine. +func (ht *serverHandlerTransport) do(fn func()) error { + // Avoid a panic writing to closed channel. Imperfect but maybe good enough. + select { + case <-ht.closedCh: + return ErrConnClosing + default: + select { + case ht.writes <- fn: + return nil + case <-ht.closedCh: + return ErrConnClosing + } + } +} + +func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) error { + ht.writeStatusMu.Lock() + defer ht.writeStatusMu.Unlock() + + err := ht.do(func() { + ht.writeCommonHeaders(s) + + // And flush, in case no header or body has been sent yet. + // This forces a separation of headers and trailers if this is the + // first call (for example, in end2end tests's TestNoService). + ht.rw.(http.Flusher).Flush() + + h := ht.rw.Header() + h.Set("Grpc-Status", fmt.Sprintf("%d", st.Code())) + if m := st.Message(); m != "" { + h.Set("Grpc-Message", encodeGrpcMessage(m)) + } + + if p := st.Proto(); p != nil && len(p.Details) > 0 { + stBytes, err := proto.Marshal(p) + if err != nil { + // TODO: return error instead, when callers are able to handle it. + panic(err) + } + + h.Set("Grpc-Status-Details-Bin", encodeBinHeader(stBytes)) + } + + if md := s.Trailer(); len(md) > 0 { + for k, vv := range md { + // Clients don't tolerate reading restricted headers after some non restricted ones were sent. + if isReservedHeader(k) { + continue + } + for _, v := range vv { + // http2 ResponseWriter mechanism to send undeclared Trailers after + // the headers have possibly been written. + h.Add(http2.TrailerPrefix+k, encodeMetadataHeader(k, v)) + } + } + } + }) + + if err == nil { // transport has not been closed + if ht.stats != nil { + ht.stats.HandleRPC(s.Context(), &stats.OutTrailer{}) + } + ht.Close() + close(ht.writes) + } + return err +} + +// writeCommonHeaders sets common headers on the first write +// call (Write, WriteHeader, or WriteStatus). +func (ht *serverHandlerTransport) writeCommonHeaders(s *Stream) { + if ht.didCommonHeaders { + return + } + ht.didCommonHeaders = true + + h := ht.rw.Header() + h["Date"] = nil // suppress Date to make tests happy; TODO: restore + h.Set("Content-Type", ht.contentType) + + // Predeclare trailers we'll set later in WriteStatus (after the body). + // This is a SHOULD in the HTTP RFC, and the way you add (known) + // Trailers per the net/http.ResponseWriter contract. + // See https://golang.org/pkg/net/http/#ResponseWriter + // and https://golang.org/pkg/net/http/#example_ResponseWriter_trailers + h.Add("Trailer", "Grpc-Status") + h.Add("Trailer", "Grpc-Message") + h.Add("Trailer", "Grpc-Status-Details-Bin") + + if s.sendCompress != "" { + h.Set("Grpc-Encoding", s.sendCompress) + } +} + +func (ht *serverHandlerTransport) Write(s *Stream, hdr []byte, data []byte, opts *Options) error { + return ht.do(func() { + ht.writeCommonHeaders(s) + ht.rw.Write(hdr) + ht.rw.Write(data) + if !opts.Delay { + ht.rw.(http.Flusher).Flush() + } + }) +} + +func (ht *serverHandlerTransport) WriteHeader(s *Stream, md metadata.MD) error { + err := ht.do(func() { + ht.writeCommonHeaders(s) + h := ht.rw.Header() + for k, vv := range md { + // Clients don't tolerate reading restricted headers after some non restricted ones were sent. + if isReservedHeader(k) { + continue + } + for _, v := range vv { + v = encodeMetadataHeader(k, v) + h.Add(k, v) + } + } + ht.rw.WriteHeader(200) + ht.rw.(http.Flusher).Flush() + }) + + if err == nil { + if ht.stats != nil { + ht.stats.HandleRPC(s.Context(), &stats.OutHeader{}) + } + } + return err +} + +func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream), traceCtx func(context.Context, string) context.Context) { + // With this transport type there will be exactly 1 stream: this HTTP request. + + ctx := contextFromRequest(ht.req) + var cancel context.CancelFunc + if ht.timeoutSet { + ctx, cancel = context.WithTimeout(ctx, ht.timeout) + } else { + ctx, cancel = context.WithCancel(ctx) + } + + // requestOver is closed when either the request's context is done + // or the status has been written via WriteStatus. + requestOver := make(chan struct{}) + + // clientGone receives a single value if peer is gone, either + // because the underlying connection is dead or because the + // peer sends an http2 RST_STREAM. + clientGone := ht.rw.(http.CloseNotifier).CloseNotify() + go func() { + select { + case <-requestOver: + return + case <-ht.closedCh: + case <-clientGone: + } + cancel() + }() + + req := ht.req + + s := &Stream{ + id: 0, // irrelevant + requestRead: func(int) {}, + cancel: cancel, + buf: newRecvBuffer(), + st: ht, + method: req.URL.Path, + recvCompress: req.Header.Get("grpc-encoding"), + contentSubtype: ht.contentSubtype, + } + pr := &peer.Peer{ + Addr: ht.RemoteAddr(), + } + if req.TLS != nil { + pr.AuthInfo = credentials.TLSInfo{State: *req.TLS} + } + ctx = metadata.NewIncomingContext(ctx, ht.headerMD) + s.ctx = peer.NewContext(ctx, pr) + if ht.stats != nil { + s.ctx = ht.stats.TagRPC(s.ctx, &stats.RPCTagInfo{FullMethodName: s.method}) + inHeader := &stats.InHeader{ + FullMethod: s.method, + RemoteAddr: ht.RemoteAddr(), + Compression: s.recvCompress, + } + ht.stats.HandleRPC(s.ctx, inHeader) + } + s.trReader = &transportReader{ + reader: &recvBufferReader{ctx: s.ctx, ctxDone: s.ctx.Done(), recv: s.buf}, + windowHandler: func(int) {}, + } + + // readerDone is closed when the Body.Read-ing goroutine exits. + readerDone := make(chan struct{}) + go func() { + defer close(readerDone) + + // TODO: minimize garbage, optimize recvBuffer code/ownership + const readSize = 8196 + for buf := make([]byte, readSize); ; { + n, err := req.Body.Read(buf) + if n > 0 { + s.buf.put(recvMsg{data: buf[:n:n]}) + buf = buf[n:] + } + if err != nil { + s.buf.put(recvMsg{err: mapRecvMsgError(err)}) + return + } + if len(buf) == 0 { + buf = make([]byte, readSize) + } + } + }() + + // startStream is provided by the *grpc.Server's serveStreams. + // It starts a goroutine serving s and exits immediately. + // The goroutine that is started is the one that then calls + // into ht, calling WriteHeader, Write, WriteStatus, Close, etc. + startStream(s) + + ht.runStream() + close(requestOver) + + // Wait for reading goroutine to finish. + req.Body.Close() + <-readerDone +} + +func (ht *serverHandlerTransport) runStream() { + for { + select { + case fn, ok := <-ht.writes: + if !ok { + return + } + fn() + case <-ht.closedCh: + return + } + } +} + +func (ht *serverHandlerTransport) IncrMsgSent() {} + +func (ht *serverHandlerTransport) IncrMsgRecv() {} + +func (ht *serverHandlerTransport) Drain() { + panic("Drain() is not implemented") +} + +// mapRecvMsgError returns the non-nil err into the appropriate +// error value as expected by callers of *grpc.parser.recvMsg. +// In particular, in can only be: +// * io.EOF +// * io.ErrUnexpectedEOF +// * of type transport.ConnectionError +// * of type transport.StreamError +func mapRecvMsgError(err error) error { + if err == io.EOF || err == io.ErrUnexpectedEOF { + return err + } + if se, ok := err.(http2.StreamError); ok { + if code, ok := http2ErrConvTab[se.Code]; ok { + return StreamError{ + Code: code, + Desc: se.Error(), + } + } + } + return connectionErrorf(true, err, err.Error()) +} diff --git a/vendor/google.golang.org/grpc/transport/http2_client.go b/vendor/google.golang.org/grpc/transport/http2_client.go new file mode 100644 index 00000000000..1fdabd954ef --- /dev/null +++ b/vendor/google.golang.org/grpc/transport/http2_client.go @@ -0,0 +1,1284 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package transport + +import ( + "io" + "math" + "net" + "strings" + "sync" + "sync/atomic" + "time" + + "golang.org/x/net/context" + "golang.org/x/net/http2" + "golang.org/x/net/http2/hpack" + + "google.golang.org/grpc/channelz" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/keepalive" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/peer" + "google.golang.org/grpc/stats" + "google.golang.org/grpc/status" +) + +// http2Client implements the ClientTransport interface with HTTP2. +type http2Client struct { + ctx context.Context + cancel context.CancelFunc + ctxDone <-chan struct{} // Cache the ctx.Done() chan. + userAgent string + md interface{} + conn net.Conn // underlying communication channel + loopy *loopyWriter + remoteAddr net.Addr + localAddr net.Addr + authInfo credentials.AuthInfo // auth info about the connection + + readerDone chan struct{} // sync point to enable testing. + writerDone chan struct{} // sync point to enable testing. + // goAway is closed to notify the upper layer (i.e., addrConn.transportMonitor) + // that the server sent GoAway on this transport. + goAway chan struct{} + // awakenKeepalive is used to wake up keepalive when after it has gone dormant. + awakenKeepalive chan struct{} + + framer *framer + // controlBuf delivers all the control related tasks (e.g., window + // updates, reset streams, and various settings) to the controller. + controlBuf *controlBuffer + fc *trInFlow + // The scheme used: https if TLS is on, http otherwise. + scheme string + + isSecure bool + + creds []credentials.PerRPCCredentials + + // Boolean to keep track of reading activity on transport. + // 1 is true and 0 is false. + activity uint32 // Accessed atomically. + kp keepalive.ClientParameters + + statsHandler stats.Handler + + initialWindowSize int32 + + bdpEst *bdpEstimator + // onSuccess is a callback that client transport calls upon + // receiving server preface to signal that a succefull HTTP2 + // connection was established. + onSuccess func() + + maxConcurrentStreams uint32 + streamQuota int64 + streamsQuotaAvailable chan struct{} + waitingStreams uint32 + nextID uint32 + + mu sync.Mutex // guard the following variables + state transportState + activeStreams map[uint32]*Stream + // prevGoAway ID records the Last-Stream-ID in the previous GOAway frame. + prevGoAwayID uint32 + // goAwayReason records the http2.ErrCode and debug data received with the + // GoAway frame. + goAwayReason GoAwayReason + + // Fields below are for channelz metric collection. + channelzID int64 // channelz unique identification number + czmu sync.RWMutex + kpCount int64 + // The number of streams that have started, including already finished ones. + streamsStarted int64 + // The number of streams that have ended successfully by receiving EoS bit set + // frame from server. + streamsSucceeded int64 + streamsFailed int64 + lastStreamCreated time.Time + msgSent int64 + msgRecv int64 + lastMsgSent time.Time + lastMsgRecv time.Time +} + +func dial(ctx context.Context, fn func(context.Context, string) (net.Conn, error), addr string) (net.Conn, error) { + if fn != nil { + return fn(ctx, addr) + } + return dialContext(ctx, "tcp", addr) +} + +func isTemporary(err error) bool { + switch err := err.(type) { + case interface { + Temporary() bool + }: + return err.Temporary() + case interface { + Timeout() bool + }: + // Timeouts may be resolved upon retry, and are thus treated as + // temporary. + return err.Timeout() + } + return true +} + +// newHTTP2Client constructs a connected ClientTransport to addr based on HTTP2 +// and starts to receive messages on it. Non-nil error returns if construction +// fails. +func newHTTP2Client(connectCtx, ctx context.Context, addr TargetInfo, opts ConnectOptions, onSuccess func()) (_ ClientTransport, err error) { + scheme := "http" + ctx, cancel := context.WithCancel(ctx) + defer func() { + if err != nil { + cancel() + } + }() + + conn, err := dial(connectCtx, opts.Dialer, addr.Addr) + if err != nil { + if opts.FailOnNonTempDialError { + return nil, connectionErrorf(isTemporary(err), err, "transport: error while dialing: %v", err) + } + return nil, connectionErrorf(true, err, "transport: Error while dialing %v", err) + } + // Any further errors will close the underlying connection + defer func(conn net.Conn) { + if err != nil { + conn.Close() + } + }(conn) + var ( + isSecure bool + authInfo credentials.AuthInfo + ) + if creds := opts.TransportCredentials; creds != nil { + scheme = "https" + conn, authInfo, err = creds.ClientHandshake(connectCtx, addr.Authority, conn) + if err != nil { + return nil, connectionErrorf(isTemporary(err), err, "transport: authentication handshake failed: %v", err) + } + isSecure = true + } + kp := opts.KeepaliveParams + // Validate keepalive parameters. + if kp.Time == 0 { + kp.Time = defaultClientKeepaliveTime + } + if kp.Timeout == 0 { + kp.Timeout = defaultClientKeepaliveTimeout + } + dynamicWindow := true + icwz := int32(initialWindowSize) + if opts.InitialConnWindowSize >= defaultWindowSize { + icwz = opts.InitialConnWindowSize + dynamicWindow = false + } + writeBufSize := defaultWriteBufSize + if opts.WriteBufferSize > 0 { + writeBufSize = opts.WriteBufferSize + } + readBufSize := defaultReadBufSize + if opts.ReadBufferSize > 0 { + readBufSize = opts.ReadBufferSize + } + t := &http2Client{ + ctx: ctx, + ctxDone: ctx.Done(), // Cache Done chan. + cancel: cancel, + userAgent: opts.UserAgent, + md: addr.Metadata, + conn: conn, + remoteAddr: conn.RemoteAddr(), + localAddr: conn.LocalAddr(), + authInfo: authInfo, + readerDone: make(chan struct{}), + writerDone: make(chan struct{}), + goAway: make(chan struct{}), + awakenKeepalive: make(chan struct{}, 1), + framer: newFramer(conn, writeBufSize, readBufSize), + fc: &trInFlow{limit: uint32(icwz)}, + scheme: scheme, + activeStreams: make(map[uint32]*Stream), + isSecure: isSecure, + creds: opts.PerRPCCredentials, + kp: kp, + statsHandler: opts.StatsHandler, + initialWindowSize: initialWindowSize, + onSuccess: onSuccess, + nextID: 1, + maxConcurrentStreams: defaultMaxStreamsClient, + streamQuota: defaultMaxStreamsClient, + streamsQuotaAvailable: make(chan struct{}, 1), + } + t.controlBuf = newControlBuffer(t.ctxDone) + if opts.InitialWindowSize >= defaultWindowSize { + t.initialWindowSize = opts.InitialWindowSize + dynamicWindow = false + } + if dynamicWindow { + t.bdpEst = &bdpEstimator{ + bdp: initialWindowSize, + updateFlowControl: t.updateFlowControl, + } + } + // Make sure awakenKeepalive can't be written upon. + // keepalive routine will make it writable, if need be. + t.awakenKeepalive <- struct{}{} + if t.statsHandler != nil { + t.ctx = t.statsHandler.TagConn(t.ctx, &stats.ConnTagInfo{ + RemoteAddr: t.remoteAddr, + LocalAddr: t.localAddr, + }) + connBegin := &stats.ConnBegin{ + Client: true, + } + t.statsHandler.HandleConn(t.ctx, connBegin) + } + if channelz.IsOn() { + t.channelzID = channelz.RegisterNormalSocket(t, opts.ChannelzParentID, "") + } + // Start the reader goroutine for incoming message. Each transport has + // a dedicated goroutine which reads HTTP2 frame from network. Then it + // dispatches the frame to the corresponding stream entity. + go t.reader() + // Send connection preface to server. + n, err := t.conn.Write(clientPreface) + if err != nil { + t.Close() + return nil, connectionErrorf(true, err, "transport: failed to write client preface: %v", err) + } + if n != len(clientPreface) { + t.Close() + return nil, connectionErrorf(true, err, "transport: preface mismatch, wrote %d bytes; want %d", n, len(clientPreface)) + } + if t.initialWindowSize != defaultWindowSize { + err = t.framer.fr.WriteSettings(http2.Setting{ + ID: http2.SettingInitialWindowSize, + Val: uint32(t.initialWindowSize), + }) + } else { + err = t.framer.fr.WriteSettings() + } + if err != nil { + t.Close() + return nil, connectionErrorf(true, err, "transport: failed to write initial settings frame: %v", err) + } + // Adjust the connection flow control window if needed. + if delta := uint32(icwz - defaultWindowSize); delta > 0 { + if err := t.framer.fr.WriteWindowUpdate(0, delta); err != nil { + t.Close() + return nil, connectionErrorf(true, err, "transport: failed to write window update: %v", err) + } + } + t.framer.writer.Flush() + go func() { + t.loopy = newLoopyWriter(clientSide, t.framer, t.controlBuf, t.bdpEst) + t.loopy.run() + t.conn.Close() + close(t.writerDone) + }() + if t.kp.Time != infinity { + go t.keepalive() + } + return t, nil +} + +func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *Stream { + // TODO(zhaoq): Handle uint32 overflow of Stream.id. + s := &Stream{ + done: make(chan struct{}), + method: callHdr.Method, + sendCompress: callHdr.SendCompress, + buf: newRecvBuffer(), + headerChan: make(chan struct{}), + contentSubtype: callHdr.ContentSubtype, + } + s.wq = newWriteQuota(defaultWriteQuota, s.done) + s.requestRead = func(n int) { + t.adjustWindow(s, uint32(n)) + } + // The client side stream context should have exactly the same life cycle with the user provided context. + // That means, s.ctx should be read-only. And s.ctx is done iff ctx is done. + // So we use the original context here instead of creating a copy. + s.ctx = ctx + s.trReader = &transportReader{ + reader: &recvBufferReader{ + ctx: s.ctx, + ctxDone: s.ctx.Done(), + recv: s.buf, + }, + windowHandler: func(n int) { + t.updateWindow(s, uint32(n)) + }, + } + return s +} + +func (t *http2Client) getPeer() *peer.Peer { + pr := &peer.Peer{ + Addr: t.remoteAddr, + } + // Attach Auth info if there is any. + if t.authInfo != nil { + pr.AuthInfo = t.authInfo + } + return pr +} + +func (t *http2Client) createHeaderFields(ctx context.Context, callHdr *CallHdr) ([]hpack.HeaderField, error) { + aud := t.createAudience(callHdr) + authData, err := t.getTrAuthData(ctx, aud) + if err != nil { + return nil, err + } + callAuthData, err := t.getCallAuthData(ctx, aud, callHdr) + if err != nil { + return nil, err + } + // TODO(mmukhi): Benchmark if the performance gets better if count the metadata and other header fields + // first and create a slice of that exact size. + // Make the slice of certain predictable size to reduce allocations made by append. + hfLen := 7 // :method, :scheme, :path, :authority, content-type, user-agent, te + hfLen += len(authData) + len(callAuthData) + headerFields := make([]hpack.HeaderField, 0, hfLen) + headerFields = append(headerFields, hpack.HeaderField{Name: ":method", Value: "POST"}) + headerFields = append(headerFields, hpack.HeaderField{Name: ":scheme", Value: t.scheme}) + headerFields = append(headerFields, hpack.HeaderField{Name: ":path", Value: callHdr.Method}) + headerFields = append(headerFields, hpack.HeaderField{Name: ":authority", Value: callHdr.Host}) + headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: contentType(callHdr.ContentSubtype)}) + headerFields = append(headerFields, hpack.HeaderField{Name: "user-agent", Value: t.userAgent}) + headerFields = append(headerFields, hpack.HeaderField{Name: "te", Value: "trailers"}) + + if callHdr.SendCompress != "" { + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-encoding", Value: callHdr.SendCompress}) + } + if dl, ok := ctx.Deadline(); ok { + // Send out timeout regardless its value. The server can detect timeout context by itself. + // TODO(mmukhi): Perhaps this field should be updated when actually writing out to the wire. + timeout := dl.Sub(time.Now()) + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-timeout", Value: encodeTimeout(timeout)}) + } + for k, v := range authData { + headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) + } + for k, v := range callAuthData { + headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) + } + if b := stats.OutgoingTags(ctx); b != nil { + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-tags-bin", Value: encodeBinHeader(b)}) + } + if b := stats.OutgoingTrace(ctx); b != nil { + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-trace-bin", Value: encodeBinHeader(b)}) + } + + if md, added, ok := metadata.FromOutgoingContextRaw(ctx); ok { + var k string + for _, vv := range added { + for i, v := range vv { + if i%2 == 0 { + k = v + continue + } + // HTTP doesn't allow you to set pseudoheaders after non pseudoheaders were set. + if isReservedHeader(k) { + continue + } + headerFields = append(headerFields, hpack.HeaderField{Name: strings.ToLower(k), Value: encodeMetadataHeader(k, v)}) + } + } + for k, vv := range md { + // HTTP doesn't allow you to set pseudoheaders after non pseudoheaders were set. + if isReservedHeader(k) { + continue + } + for _, v := range vv { + headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) + } + } + } + if md, ok := t.md.(*metadata.MD); ok { + for k, vv := range *md { + if isReservedHeader(k) { + continue + } + for _, v := range vv { + headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) + } + } + } + return headerFields, nil +} + +func (t *http2Client) createAudience(callHdr *CallHdr) string { + // Create an audience string only if needed. + if len(t.creds) == 0 && callHdr.Creds == nil { + return "" + } + // Construct URI required to get auth request metadata. + // Omit port if it is the default one. + host := strings.TrimSuffix(callHdr.Host, ":443") + pos := strings.LastIndex(callHdr.Method, "/") + if pos == -1 { + pos = len(callHdr.Method) + } + return "https://" + host + callHdr.Method[:pos] +} + +func (t *http2Client) getTrAuthData(ctx context.Context, audience string) (map[string]string, error) { + authData := map[string]string{} + for _, c := range t.creds { + data, err := c.GetRequestMetadata(ctx, audience) + if err != nil { + if _, ok := status.FromError(err); ok { + return nil, err + } + + return nil, streamErrorf(codes.Unauthenticated, "transport: %v", err) + } + for k, v := range data { + // Capital header names are illegal in HTTP/2. + k = strings.ToLower(k) + authData[k] = v + } + } + return authData, nil +} + +func (t *http2Client) getCallAuthData(ctx context.Context, audience string, callHdr *CallHdr) (map[string]string, error) { + callAuthData := map[string]string{} + // Check if credentials.PerRPCCredentials were provided via call options. + // Note: if these credentials are provided both via dial options and call + // options, then both sets of credentials will be applied. + if callCreds := callHdr.Creds; callCreds != nil { + if !t.isSecure && callCreds.RequireTransportSecurity() { + return nil, streamErrorf(codes.Unauthenticated, "transport: cannot send secure credentials on an insecure connection") + } + data, err := callCreds.GetRequestMetadata(ctx, audience) + if err != nil { + return nil, streamErrorf(codes.Internal, "transport: %v", err) + } + for k, v := range data { + // Capital header names are illegal in HTTP/2 + k = strings.ToLower(k) + callAuthData[k] = v + } + } + return callAuthData, nil +} + +// NewStream creates a stream and registers it into the transport as "active" +// streams. +func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (_ *Stream, err error) { + ctx = peer.NewContext(ctx, t.getPeer()) + headerFields, err := t.createHeaderFields(ctx, callHdr) + if err != nil { + return nil, err + } + s := t.newStream(ctx, callHdr) + cleanup := func(err error) { + if s.swapState(streamDone) == streamDone { + // If it was already done, return. + return + } + // The stream was unprocessed by the server. + atomic.StoreUint32(&s.unprocessed, 1) + s.write(recvMsg{err: err}) + close(s.done) + // If headerChan isn't closed, then close it. + if atomic.SwapUint32(&s.headerDone, 1) == 0 { + close(s.headerChan) + } + + } + hdr := &headerFrame{ + hf: headerFields, + endStream: false, + initStream: func(id uint32) (bool, error) { + t.mu.Lock() + if state := t.state; state != reachable { + t.mu.Unlock() + // Do a quick cleanup. + err := error(errStreamDrain) + if state == closing { + err = ErrConnClosing + } + cleanup(err) + return false, err + } + t.activeStreams[id] = s + if channelz.IsOn() { + t.czmu.Lock() + t.streamsStarted++ + t.lastStreamCreated = time.Now() + t.czmu.Unlock() + } + var sendPing bool + // If the number of active streams change from 0 to 1, then check if keepalive + // has gone dormant. If so, wake it up. + if len(t.activeStreams) == 1 { + select { + case t.awakenKeepalive <- struct{}{}: + sendPing = true + // Fill the awakenKeepalive channel again as this channel must be + // kept non-writable except at the point that the keepalive() + // goroutine is waiting either to be awaken or shutdown. + t.awakenKeepalive <- struct{}{} + default: + } + } + t.mu.Unlock() + return sendPing, nil + }, + onOrphaned: cleanup, + wq: s.wq, + } + firstTry := true + var ch chan struct{} + checkForStreamQuota := func(it interface{}) bool { + if t.streamQuota <= 0 { // Can go negative if server decreases it. + if firstTry { + t.waitingStreams++ + } + ch = t.streamsQuotaAvailable + return false + } + if !firstTry { + t.waitingStreams-- + } + t.streamQuota-- + h := it.(*headerFrame) + h.streamID = t.nextID + t.nextID += 2 + s.id = h.streamID + s.fc = &inFlow{limit: uint32(t.initialWindowSize)} + if t.streamQuota > 0 && t.waitingStreams > 0 { + select { + case t.streamsQuotaAvailable <- struct{}{}: + default: + } + } + return true + } + for { + success, err := t.controlBuf.executeAndPut(checkForStreamQuota, hdr) + if err != nil { + return nil, err + } + if success { + break + } + firstTry = false + select { + case <-ch: + case <-s.ctx.Done(): + return nil, ContextErr(s.ctx.Err()) + case <-t.goAway: + return nil, errStreamDrain + case <-t.ctx.Done(): + return nil, ErrConnClosing + } + } + if t.statsHandler != nil { + outHeader := &stats.OutHeader{ + Client: true, + FullMethod: callHdr.Method, + RemoteAddr: t.remoteAddr, + LocalAddr: t.localAddr, + Compression: callHdr.SendCompress, + } + t.statsHandler.HandleRPC(s.ctx, outHeader) + } + return s, nil +} + +// CloseStream clears the footprint of a stream when the stream is not needed any more. +// This must not be executed in reader's goroutine. +func (t *http2Client) CloseStream(s *Stream, err error) { + var ( + rst bool + rstCode http2.ErrCode + ) + if err != nil { + rst = true + rstCode = http2.ErrCodeCancel + } + t.closeStream(s, err, rst, rstCode, nil, nil, false) +} + +func (t *http2Client) closeStream(s *Stream, err error, rst bool, rstCode http2.ErrCode, st *status.Status, mdata map[string][]string, eosReceived bool) { + // Set stream status to done. + if s.swapState(streamDone) == streamDone { + // If it was already done, return. + return + } + // status and trailers can be updated here without any synchronization because the stream goroutine will + // only read it after it sees an io.EOF error from read or write and we'll write those errors + // only after updating this. + s.status = st + if len(mdata) > 0 { + s.trailer = mdata + } + if err != nil { + // This will unblock reads eventually. + s.write(recvMsg{err: err}) + } + // This will unblock write. + close(s.done) + // If headerChan isn't closed, then close it. + if atomic.SwapUint32(&s.headerDone, 1) == 0 { + close(s.headerChan) + } + cleanup := &cleanupStream{ + streamID: s.id, + onWrite: func() { + t.mu.Lock() + if t.activeStreams != nil { + delete(t.activeStreams, s.id) + } + t.mu.Unlock() + if channelz.IsOn() { + t.czmu.Lock() + if eosReceived { + t.streamsSucceeded++ + } else { + t.streamsFailed++ + } + t.czmu.Unlock() + } + }, + rst: rst, + rstCode: rstCode, + } + addBackStreamQuota := func(interface{}) bool { + t.streamQuota++ + if t.streamQuota > 0 && t.waitingStreams > 0 { + select { + case t.streamsQuotaAvailable <- struct{}{}: + default: + } + } + return true + } + t.controlBuf.executeAndPut(addBackStreamQuota, cleanup) +} + +// Close kicks off the shutdown process of the transport. This should be called +// only once on a transport. Once it is called, the transport should not be +// accessed any more. +func (t *http2Client) Close() error { + t.mu.Lock() + // Make sure we only Close once. + if t.state == closing { + t.mu.Unlock() + return nil + } + t.state = closing + streams := t.activeStreams + t.activeStreams = nil + t.mu.Unlock() + t.controlBuf.finish() + t.cancel() + err := t.conn.Close() + if channelz.IsOn() { + channelz.RemoveEntry(t.channelzID) + } + // Notify all active streams. + for _, s := range streams { + t.closeStream(s, ErrConnClosing, false, http2.ErrCodeNo, nil, nil, false) + } + if t.statsHandler != nil { + connEnd := &stats.ConnEnd{ + Client: true, + } + t.statsHandler.HandleConn(t.ctx, connEnd) + } + return err +} + +// GracefulClose sets the state to draining, which prevents new streams from +// being created and causes the transport to be closed when the last active +// stream is closed. If there are no active streams, the transport is closed +// immediately. This does nothing if the transport is already draining or +// closing. +func (t *http2Client) GracefulClose() error { + t.mu.Lock() + // Make sure we move to draining only from active. + if t.state == draining || t.state == closing { + t.mu.Unlock() + return nil + } + t.state = draining + active := len(t.activeStreams) + t.mu.Unlock() + if active == 0 { + return t.Close() + } + return nil +} + +// Write formats the data into HTTP2 data frame(s) and sends it out. The caller +// should proceed only if Write returns nil. +func (t *http2Client) Write(s *Stream, hdr []byte, data []byte, opts *Options) error { + if opts.Last { + // If it's the last message, update stream state. + if !s.compareAndSwapState(streamActive, streamWriteDone) { + return errStreamDone + } + } else if s.getState() != streamActive { + return errStreamDone + } + df := &dataFrame{ + streamID: s.id, + endStream: opts.Last, + } + if hdr != nil || data != nil { // If it's not an empty data frame. + // Add some data to grpc message header so that we can equally + // distribute bytes across frames. + emptyLen := http2MaxFrameLen - len(hdr) + if emptyLen > len(data) { + emptyLen = len(data) + } + hdr = append(hdr, data[:emptyLen]...) + data = data[emptyLen:] + df.h, df.d = hdr, data + // TODO(mmukhi): The above logic in this if can be moved to loopyWriter's data handler. + if err := s.wq.get(int32(len(hdr) + len(data))); err != nil { + return err + } + } + return t.controlBuf.put(df) +} + +func (t *http2Client) getStream(f http2.Frame) (*Stream, bool) { + t.mu.Lock() + defer t.mu.Unlock() + s, ok := t.activeStreams[f.Header().StreamID] + return s, ok +} + +// adjustWindow sends out extra window update over the initial window size +// of stream if the application is requesting data larger in size than +// the window. +func (t *http2Client) adjustWindow(s *Stream, n uint32) { + if w := s.fc.maybeAdjust(n); w > 0 { + t.controlBuf.put(&outgoingWindowUpdate{streamID: s.id, increment: w}) + } +} + +// updateWindow adjusts the inbound quota for the stream. +// Window updates will be sent out when the cumulative quota +// exceeds the corresponding threshold. +func (t *http2Client) updateWindow(s *Stream, n uint32) { + if w := s.fc.onRead(n); w > 0 { + t.controlBuf.put(&outgoingWindowUpdate{streamID: s.id, increment: w}) + } +} + +// updateFlowControl updates the incoming flow control windows +// for the transport and the stream based on the current bdp +// estimation. +func (t *http2Client) updateFlowControl(n uint32) { + t.mu.Lock() + for _, s := range t.activeStreams { + s.fc.newLimit(n) + } + t.mu.Unlock() + updateIWS := func(interface{}) bool { + t.initialWindowSize = int32(n) + return true + } + t.controlBuf.executeAndPut(updateIWS, &outgoingWindowUpdate{streamID: 0, increment: t.fc.newLimit(n)}) + t.controlBuf.put(&outgoingSettings{ + ss: []http2.Setting{ + { + ID: http2.SettingInitialWindowSize, + Val: n, + }, + }, + }) +} + +func (t *http2Client) handleData(f *http2.DataFrame) { + size := f.Header().Length + var sendBDPPing bool + if t.bdpEst != nil { + sendBDPPing = t.bdpEst.add(size) + } + // Decouple connection's flow control from application's read. + // An update on connection's flow control should not depend on + // whether user application has read the data or not. Such a + // restriction is already imposed on the stream's flow control, + // and therefore the sender will be blocked anyways. + // Decoupling the connection flow control will prevent other + // active(fast) streams from starving in presence of slow or + // inactive streams. + // + if w := t.fc.onData(size); w > 0 { + t.controlBuf.put(&outgoingWindowUpdate{ + streamID: 0, + increment: w, + }) + } + if sendBDPPing { + // Avoid excessive ping detection (e.g. in an L7 proxy) + // by sending a window update prior to the BDP ping. + + if w := t.fc.reset(); w > 0 { + t.controlBuf.put(&outgoingWindowUpdate{ + streamID: 0, + increment: w, + }) + } + + t.controlBuf.put(bdpPing) + } + // Select the right stream to dispatch. + s, ok := t.getStream(f) + if !ok { + return + } + if size > 0 { + if err := s.fc.onData(size); err != nil { + t.closeStream(s, io.EOF, true, http2.ErrCodeFlowControl, status.New(codes.Internal, err.Error()), nil, false) + return + } + if f.Header().Flags.Has(http2.FlagDataPadded) { + if w := s.fc.onRead(size - uint32(len(f.Data()))); w > 0 { + t.controlBuf.put(&outgoingWindowUpdate{s.id, w}) + } + } + // TODO(bradfitz, zhaoq): A copy is required here because there is no + // guarantee f.Data() is consumed before the arrival of next frame. + // Can this copy be eliminated? + if len(f.Data()) > 0 { + data := make([]byte, len(f.Data())) + copy(data, f.Data()) + s.write(recvMsg{data: data}) + } + } + // The server has closed the stream without sending trailers. Record that + // the read direction is closed, and set the status appropriately. + if f.FrameHeader.Flags.Has(http2.FlagDataEndStream) { + t.closeStream(s, io.EOF, false, http2.ErrCodeNo, status.New(codes.Internal, "server closed the stream without sending trailers"), nil, true) + } +} + +func (t *http2Client) handleRSTStream(f *http2.RSTStreamFrame) { + s, ok := t.getStream(f) + if !ok { + return + } + if f.ErrCode == http2.ErrCodeRefusedStream { + // The stream was unprocessed by the server. + atomic.StoreUint32(&s.unprocessed, 1) + } + statusCode, ok := http2ErrConvTab[f.ErrCode] + if !ok { + warningf("transport: http2Client.handleRSTStream found no mapped gRPC status for the received http2 error %v", f.ErrCode) + statusCode = codes.Unknown + } + t.closeStream(s, io.EOF, false, http2.ErrCodeNo, status.Newf(statusCode, "stream terminated by RST_STREAM with error code: %v", f.ErrCode), nil, false) +} + +func (t *http2Client) handleSettings(f *http2.SettingsFrame, isFirst bool) { + if f.IsAck() { + return + } + var maxStreams *uint32 + var ss []http2.Setting + f.ForeachSetting(func(s http2.Setting) error { + if s.ID == http2.SettingMaxConcurrentStreams { + maxStreams = new(uint32) + *maxStreams = s.Val + return nil + } + ss = append(ss, s) + return nil + }) + if isFirst && maxStreams == nil { + maxStreams = new(uint32) + *maxStreams = math.MaxUint32 + } + sf := &incomingSettings{ + ss: ss, + } + if maxStreams == nil { + t.controlBuf.put(sf) + return + } + updateStreamQuota := func(interface{}) bool { + delta := int64(*maxStreams) - int64(t.maxConcurrentStreams) + t.maxConcurrentStreams = *maxStreams + t.streamQuota += delta + if delta > 0 && t.waitingStreams > 0 { + close(t.streamsQuotaAvailable) // wake all of them up. + t.streamsQuotaAvailable = make(chan struct{}, 1) + } + return true + } + t.controlBuf.executeAndPut(updateStreamQuota, sf) +} + +func (t *http2Client) handlePing(f *http2.PingFrame) { + if f.IsAck() { + // Maybe it's a BDP ping. + if t.bdpEst != nil { + t.bdpEst.calculate(f.Data) + } + return + } + pingAck := &ping{ack: true} + copy(pingAck.data[:], f.Data[:]) + t.controlBuf.put(pingAck) +} + +func (t *http2Client) handleGoAway(f *http2.GoAwayFrame) { + t.mu.Lock() + if t.state == closing { + t.mu.Unlock() + return + } + if f.ErrCode == http2.ErrCodeEnhanceYourCalm { + infof("Client received GoAway with http2.ErrCodeEnhanceYourCalm.") + } + id := f.LastStreamID + if id > 0 && id%2 != 1 { + t.mu.Unlock() + t.Close() + return + } + // A client can receive multiple GoAways from the server (see + // https://github.com/grpc/grpc-go/issues/1387). The idea is that the first + // GoAway will be sent with an ID of MaxInt32 and the second GoAway will be + // sent after an RTT delay with the ID of the last stream the server will + // process. + // + // Therefore, when we get the first GoAway we don't necessarily close any + // streams. While in case of second GoAway we close all streams created after + // the GoAwayId. This way streams that were in-flight while the GoAway from + // server was being sent don't get killed. + select { + case <-t.goAway: // t.goAway has been closed (i.e.,multiple GoAways). + // If there are multiple GoAways the first one should always have an ID greater than the following ones. + if id > t.prevGoAwayID { + t.mu.Unlock() + t.Close() + return + } + default: + t.setGoAwayReason(f) + close(t.goAway) + t.state = draining + t.controlBuf.put(&incomingGoAway{}) + } + // All streams with IDs greater than the GoAwayId + // and smaller than the previous GoAway ID should be killed. + upperLimit := t.prevGoAwayID + if upperLimit == 0 { // This is the first GoAway Frame. + upperLimit = math.MaxUint32 // Kill all streams after the GoAway ID. + } + for streamID, stream := range t.activeStreams { + if streamID > id && streamID <= upperLimit { + // The stream was unprocessed by the server. + atomic.StoreUint32(&stream.unprocessed, 1) + t.closeStream(stream, errStreamDrain, false, http2.ErrCodeNo, statusGoAway, nil, false) + } + } + t.prevGoAwayID = id + active := len(t.activeStreams) + t.mu.Unlock() + if active == 0 { + t.Close() + } +} + +// setGoAwayReason sets the value of t.goAwayReason based +// on the GoAway frame received. +// It expects a lock on transport's mutext to be held by +// the caller. +func (t *http2Client) setGoAwayReason(f *http2.GoAwayFrame) { + t.goAwayReason = GoAwayNoReason + switch f.ErrCode { + case http2.ErrCodeEnhanceYourCalm: + if string(f.DebugData()) == "too_many_pings" { + t.goAwayReason = GoAwayTooManyPings + } + } +} + +func (t *http2Client) GetGoAwayReason() GoAwayReason { + t.mu.Lock() + defer t.mu.Unlock() + return t.goAwayReason +} + +func (t *http2Client) handleWindowUpdate(f *http2.WindowUpdateFrame) { + t.controlBuf.put(&incomingWindowUpdate{ + streamID: f.Header().StreamID, + increment: f.Increment, + }) +} + +// operateHeaders takes action on the decoded headers. +func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { + s, ok := t.getStream(frame) + if !ok { + return + } + atomic.StoreUint32(&s.bytesReceived, 1) + var state decodeState + if err := state.decodeResponseHeader(frame); err != nil { + t.closeStream(s, err, true, http2.ErrCodeProtocol, nil, nil, false) + // Something wrong. Stops reading even when there is remaining. + return + } + + endStream := frame.StreamEnded() + var isHeader bool + defer func() { + if t.statsHandler != nil { + if isHeader { + inHeader := &stats.InHeader{ + Client: true, + WireLength: int(frame.Header().Length), + } + t.statsHandler.HandleRPC(s.ctx, inHeader) + } else { + inTrailer := &stats.InTrailer{ + Client: true, + WireLength: int(frame.Header().Length), + } + t.statsHandler.HandleRPC(s.ctx, inTrailer) + } + } + }() + // If headers haven't been received yet. + if atomic.SwapUint32(&s.headerDone, 1) == 0 { + if !endStream { + // Headers frame is not actually a trailers-only frame. + isHeader = true + // These values can be set without any synchronization because + // stream goroutine will read it only after seeing a closed + // headerChan which we'll close after setting this. + s.recvCompress = state.encoding + if len(state.mdata) > 0 { + s.header = state.mdata + } + } + close(s.headerChan) + } + if !endStream { + return + } + t.closeStream(s, io.EOF, false, http2.ErrCodeNo, state.status(), state.mdata, true) +} + +// reader runs as a separate goroutine in charge of reading data from network +// connection. +// +// TODO(zhaoq): currently one reader per transport. Investigate whether this is +// optimal. +// TODO(zhaoq): Check the validity of the incoming frame sequence. +func (t *http2Client) reader() { + defer close(t.readerDone) + // Check the validity of server preface. + frame, err := t.framer.fr.ReadFrame() + if err != nil { + t.Close() + return + } + atomic.CompareAndSwapUint32(&t.activity, 0, 1) + sf, ok := frame.(*http2.SettingsFrame) + if !ok { + t.Close() + return + } + t.onSuccess() + t.handleSettings(sf, true) + + // loop to keep reading incoming messages on this transport. + for { + frame, err := t.framer.fr.ReadFrame() + atomic.CompareAndSwapUint32(&t.activity, 0, 1) + if err != nil { + // Abort an active stream if the http2.Framer returns a + // http2.StreamError. This can happen only if the server's response + // is malformed http2. + if se, ok := err.(http2.StreamError); ok { + t.mu.Lock() + s := t.activeStreams[se.StreamID] + t.mu.Unlock() + if s != nil { + // use error detail to provide better err message + t.closeStream(s, streamErrorf(http2ErrConvTab[se.Code], "%v", t.framer.fr.ErrorDetail()), true, http2.ErrCodeProtocol, nil, nil, false) + } + continue + } else { + // Transport error. + t.Close() + return + } + } + switch frame := frame.(type) { + case *http2.MetaHeadersFrame: + t.operateHeaders(frame) + case *http2.DataFrame: + t.handleData(frame) + case *http2.RSTStreamFrame: + t.handleRSTStream(frame) + case *http2.SettingsFrame: + t.handleSettings(frame, false) + case *http2.PingFrame: + t.handlePing(frame) + case *http2.GoAwayFrame: + t.handleGoAway(frame) + case *http2.WindowUpdateFrame: + t.handleWindowUpdate(frame) + default: + errorf("transport: http2Client.reader got unhandled frame type %v.", frame) + } + } +} + +// keepalive running in a separate goroutune makes sure the connection is alive by sending pings. +func (t *http2Client) keepalive() { + p := &ping{data: [8]byte{}} + timer := time.NewTimer(t.kp.Time) + for { + select { + case <-timer.C: + if atomic.CompareAndSwapUint32(&t.activity, 1, 0) { + timer.Reset(t.kp.Time) + continue + } + // Check if keepalive should go dormant. + t.mu.Lock() + if len(t.activeStreams) < 1 && !t.kp.PermitWithoutStream { + // Make awakenKeepalive writable. + <-t.awakenKeepalive + t.mu.Unlock() + select { + case <-t.awakenKeepalive: + // If the control gets here a ping has been sent + // need to reset the timer with keepalive.Timeout. + case <-t.ctx.Done(): + return + } + } else { + t.mu.Unlock() + if channelz.IsOn() { + t.czmu.Lock() + t.kpCount++ + t.czmu.Unlock() + } + // Send ping. + t.controlBuf.put(p) + } + + // By the time control gets here a ping has been sent one way or the other. + timer.Reset(t.kp.Timeout) + select { + case <-timer.C: + if atomic.CompareAndSwapUint32(&t.activity, 1, 0) { + timer.Reset(t.kp.Time) + continue + } + t.Close() + return + case <-t.ctx.Done(): + if !timer.Stop() { + <-timer.C + } + return + } + case <-t.ctx.Done(): + if !timer.Stop() { + <-timer.C + } + return + } + } +} + +func (t *http2Client) Error() <-chan struct{} { + return t.ctx.Done() +} + +func (t *http2Client) GoAway() <-chan struct{} { + return t.goAway +} + +func (t *http2Client) ChannelzMetric() *channelz.SocketInternalMetric { + t.czmu.RLock() + s := channelz.SocketInternalMetric{ + StreamsStarted: t.streamsStarted, + StreamsSucceeded: t.streamsSucceeded, + StreamsFailed: t.streamsFailed, + MessagesSent: t.msgSent, + MessagesReceived: t.msgRecv, + KeepAlivesSent: t.kpCount, + LastLocalStreamCreatedTimestamp: t.lastStreamCreated, + LastMessageSentTimestamp: t.lastMsgSent, + LastMessageReceivedTimestamp: t.lastMsgRecv, + LocalFlowControlWindow: int64(t.fc.getSize()), + //socket options + LocalAddr: t.localAddr, + RemoteAddr: t.remoteAddr, + // Security + // RemoteName : + } + t.czmu.RUnlock() + s.RemoteFlowControlWindow = t.getOutFlowWindow() + return &s +} + +func (t *http2Client) IncrMsgSent() { + t.czmu.Lock() + t.msgSent++ + t.lastMsgSent = time.Now() + t.czmu.Unlock() +} + +func (t *http2Client) IncrMsgRecv() { + t.czmu.Lock() + t.msgRecv++ + t.lastMsgRecv = time.Now() + t.czmu.Unlock() +} + +func (t *http2Client) getOutFlowWindow() int64 { + resp := make(chan uint32, 1) + timer := time.NewTimer(time.Second) + defer timer.Stop() + t.controlBuf.put(&outFlowControlSizeRequest{resp}) + select { + case sz := <-resp: + return int64(sz) + case <-t.ctxDone: + return -1 + case <-timer.C: + return -2 + } +} diff --git a/vendor/google.golang.org/grpc/transport/http2_server.go b/vendor/google.golang.org/grpc/transport/http2_server.go new file mode 100644 index 00000000000..ab356189050 --- /dev/null +++ b/vendor/google.golang.org/grpc/transport/http2_server.go @@ -0,0 +1,1137 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package transport + +import ( + "bytes" + "errors" + "fmt" + "io" + "math" + "math/rand" + "net" + "strconv" + "sync" + "sync/atomic" + "time" + + "github.com/golang/protobuf/proto" + "golang.org/x/net/context" + "golang.org/x/net/http2" + "golang.org/x/net/http2/hpack" + + "google.golang.org/grpc/channelz" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/keepalive" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/peer" + "google.golang.org/grpc/stats" + "google.golang.org/grpc/status" + "google.golang.org/grpc/tap" +) + +// ErrIllegalHeaderWrite indicates that setting header is illegal because of +// the stream's state. +var ErrIllegalHeaderWrite = errors.New("transport: the stream is done or WriteHeader was already called") + +// http2Server implements the ServerTransport interface with HTTP2. +type http2Server struct { + ctx context.Context + ctxDone <-chan struct{} // Cache the context.Done() chan + cancel context.CancelFunc + conn net.Conn + loopy *loopyWriter + readerDone chan struct{} // sync point to enable testing. + writerDone chan struct{} // sync point to enable testing. + remoteAddr net.Addr + localAddr net.Addr + maxStreamID uint32 // max stream ID ever seen + authInfo credentials.AuthInfo // auth info about the connection + inTapHandle tap.ServerInHandle + framer *framer + // The max number of concurrent streams. + maxStreams uint32 + // controlBuf delivers all the control related tasks (e.g., window + // updates, reset streams, and various settings) to the controller. + controlBuf *controlBuffer + fc *trInFlow + stats stats.Handler + // Flag to keep track of reading activity on transport. + // 1 is true and 0 is false. + activity uint32 // Accessed atomically. + // Keepalive and max-age parameters for the server. + kp keepalive.ServerParameters + + // Keepalive enforcement policy. + kep keepalive.EnforcementPolicy + // The time instance last ping was received. + lastPingAt time.Time + // Number of times the client has violated keepalive ping policy so far. + pingStrikes uint8 + // Flag to signify that number of ping strikes should be reset to 0. + // This is set whenever data or header frames are sent. + // 1 means yes. + resetPingStrikes uint32 // Accessed atomically. + initialWindowSize int32 + bdpEst *bdpEstimator + + mu sync.Mutex // guard the following + + // drainChan is initialized when drain(...) is called the first time. + // After which the server writes out the first GoAway(with ID 2^31-1) frame. + // Then an independent goroutine will be launched to later send the second GoAway. + // During this time we don't want to write another first GoAway(with ID 2^31 -1) frame. + // Thus call to drain(...) will be a no-op if drainChan is already initialized since draining is + // already underway. + drainChan chan struct{} + state transportState + activeStreams map[uint32]*Stream + // idle is the time instant when the connection went idle. + // This is either the beginning of the connection or when the number of + // RPCs go down to 0. + // When the connection is busy, this value is set to 0. + idle time.Time + + // Fields below are for channelz metric collection. + channelzID int64 // channelz unique identification number + czmu sync.RWMutex + kpCount int64 + // The number of streams that have started, including already finished ones. + streamsStarted int64 + // The number of streams that have ended successfully by sending frame with + // EoS bit set. + streamsSucceeded int64 + streamsFailed int64 + lastStreamCreated time.Time + msgSent int64 + msgRecv int64 + lastMsgSent time.Time + lastMsgRecv time.Time +} + +// newHTTP2Server constructs a ServerTransport based on HTTP2. ConnectionError is +// returned if something goes wrong. +func newHTTP2Server(conn net.Conn, config *ServerConfig) (_ ServerTransport, err error) { + writeBufSize := defaultWriteBufSize + if config.WriteBufferSize > 0 { + writeBufSize = config.WriteBufferSize + } + readBufSize := defaultReadBufSize + if config.ReadBufferSize > 0 { + readBufSize = config.ReadBufferSize + } + framer := newFramer(conn, writeBufSize, readBufSize) + // Send initial settings as connection preface to client. + var isettings []http2.Setting + // TODO(zhaoq): Have a better way to signal "no limit" because 0 is + // permitted in the HTTP2 spec. + maxStreams := config.MaxStreams + if maxStreams == 0 { + maxStreams = math.MaxUint32 + } else { + isettings = append(isettings, http2.Setting{ + ID: http2.SettingMaxConcurrentStreams, + Val: maxStreams, + }) + } + dynamicWindow := true + iwz := int32(initialWindowSize) + if config.InitialWindowSize >= defaultWindowSize { + iwz = config.InitialWindowSize + dynamicWindow = false + } + icwz := int32(initialWindowSize) + if config.InitialConnWindowSize >= defaultWindowSize { + icwz = config.InitialConnWindowSize + dynamicWindow = false + } + if iwz != defaultWindowSize { + isettings = append(isettings, http2.Setting{ + ID: http2.SettingInitialWindowSize, + Val: uint32(iwz)}) + } + if err := framer.fr.WriteSettings(isettings...); err != nil { + return nil, connectionErrorf(false, err, "transport: %v", err) + } + // Adjust the connection flow control window if needed. + if delta := uint32(icwz - defaultWindowSize); delta > 0 { + if err := framer.fr.WriteWindowUpdate(0, delta); err != nil { + return nil, connectionErrorf(false, err, "transport: %v", err) + } + } + kp := config.KeepaliveParams + if kp.MaxConnectionIdle == 0 { + kp.MaxConnectionIdle = defaultMaxConnectionIdle + } + if kp.MaxConnectionAge == 0 { + kp.MaxConnectionAge = defaultMaxConnectionAge + } + // Add a jitter to MaxConnectionAge. + kp.MaxConnectionAge += getJitter(kp.MaxConnectionAge) + if kp.MaxConnectionAgeGrace == 0 { + kp.MaxConnectionAgeGrace = defaultMaxConnectionAgeGrace + } + if kp.Time == 0 { + kp.Time = defaultServerKeepaliveTime + } + if kp.Timeout == 0 { + kp.Timeout = defaultServerKeepaliveTimeout + } + kep := config.KeepalivePolicy + if kep.MinTime == 0 { + kep.MinTime = defaultKeepalivePolicyMinTime + } + ctx, cancel := context.WithCancel(context.Background()) + t := &http2Server{ + ctx: ctx, + cancel: cancel, + ctxDone: ctx.Done(), + conn: conn, + remoteAddr: conn.RemoteAddr(), + localAddr: conn.LocalAddr(), + authInfo: config.AuthInfo, + framer: framer, + readerDone: make(chan struct{}), + writerDone: make(chan struct{}), + maxStreams: maxStreams, + inTapHandle: config.InTapHandle, + fc: &trInFlow{limit: uint32(icwz)}, + state: reachable, + activeStreams: make(map[uint32]*Stream), + stats: config.StatsHandler, + kp: kp, + idle: time.Now(), + kep: kep, + initialWindowSize: iwz, + } + t.controlBuf = newControlBuffer(t.ctxDone) + if dynamicWindow { + t.bdpEst = &bdpEstimator{ + bdp: initialWindowSize, + updateFlowControl: t.updateFlowControl, + } + } + if t.stats != nil { + t.ctx = t.stats.TagConn(t.ctx, &stats.ConnTagInfo{ + RemoteAddr: t.remoteAddr, + LocalAddr: t.localAddr, + }) + connBegin := &stats.ConnBegin{} + t.stats.HandleConn(t.ctx, connBegin) + } + if channelz.IsOn() { + t.channelzID = channelz.RegisterNormalSocket(t, config.ChannelzParentID, "") + } + t.framer.writer.Flush() + + defer func() { + if err != nil { + t.Close() + } + }() + + // Check the validity of client preface. + preface := make([]byte, len(clientPreface)) + if _, err := io.ReadFull(t.conn, preface); err != nil { + return nil, connectionErrorf(false, err, "transport: http2Server.HandleStreams failed to receive the preface from client: %v", err) + } + if !bytes.Equal(preface, clientPreface) { + return nil, connectionErrorf(false, nil, "transport: http2Server.HandleStreams received bogus greeting from client: %q", preface) + } + + frame, err := t.framer.fr.ReadFrame() + if err == io.EOF || err == io.ErrUnexpectedEOF { + return nil, err + } + if err != nil { + return nil, connectionErrorf(false, err, "transport: http2Server.HandleStreams failed to read initial settings frame: %v", err) + } + atomic.StoreUint32(&t.activity, 1) + sf, ok := frame.(*http2.SettingsFrame) + if !ok { + return nil, connectionErrorf(false, nil, "transport: http2Server.HandleStreams saw invalid preface type %T from client", frame) + } + t.handleSettings(sf) + + go func() { + t.loopy = newLoopyWriter(serverSide, t.framer, t.controlBuf, t.bdpEst) + t.loopy.ssGoAwayHandler = t.outgoingGoAwayHandler + t.loopy.run() + t.conn.Close() + close(t.writerDone) + }() + go t.keepalive() + return t, nil +} + +// operateHeader takes action on the decoded headers. +func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(*Stream), traceCtx func(context.Context, string) context.Context) (close bool) { + streamID := frame.Header().StreamID + var state decodeState + for _, hf := range frame.Fields { + if err := state.processHeaderField(hf); err != nil { + if se, ok := err.(StreamError); ok { + t.controlBuf.put(&cleanupStream{ + streamID: streamID, + rst: true, + rstCode: statusCodeConvTab[se.Code], + onWrite: func() {}, + }) + } + return + } + } + + buf := newRecvBuffer() + s := &Stream{ + id: streamID, + st: t, + buf: buf, + fc: &inFlow{limit: uint32(t.initialWindowSize)}, + recvCompress: state.encoding, + method: state.method, + contentSubtype: state.contentSubtype, + } + if frame.StreamEnded() { + // s is just created by the caller. No lock needed. + s.state = streamReadDone + } + if state.timeoutSet { + s.ctx, s.cancel = context.WithTimeout(t.ctx, state.timeout) + } else { + s.ctx, s.cancel = context.WithCancel(t.ctx) + } + pr := &peer.Peer{ + Addr: t.remoteAddr, + } + // Attach Auth info if there is any. + if t.authInfo != nil { + pr.AuthInfo = t.authInfo + } + s.ctx = peer.NewContext(s.ctx, pr) + // Attach the received metadata to the context. + if len(state.mdata) > 0 { + s.ctx = metadata.NewIncomingContext(s.ctx, state.mdata) + } + if state.statsTags != nil { + s.ctx = stats.SetIncomingTags(s.ctx, state.statsTags) + } + if state.statsTrace != nil { + s.ctx = stats.SetIncomingTrace(s.ctx, state.statsTrace) + } + if t.inTapHandle != nil { + var err error + info := &tap.Info{ + FullMethodName: state.method, + } + s.ctx, err = t.inTapHandle(s.ctx, info) + if err != nil { + warningf("transport: http2Server.operateHeaders got an error from InTapHandle: %v", err) + t.controlBuf.put(&cleanupStream{ + streamID: s.id, + rst: true, + rstCode: http2.ErrCodeRefusedStream, + onWrite: func() {}, + }) + return + } + } + t.mu.Lock() + if t.state != reachable { + t.mu.Unlock() + return + } + if uint32(len(t.activeStreams)) >= t.maxStreams { + t.mu.Unlock() + t.controlBuf.put(&cleanupStream{ + streamID: streamID, + rst: true, + rstCode: http2.ErrCodeRefusedStream, + onWrite: func() {}, + }) + return + } + if streamID%2 != 1 || streamID <= t.maxStreamID { + t.mu.Unlock() + // illegal gRPC stream id. + errorf("transport: http2Server.HandleStreams received an illegal stream id: %v", streamID) + return true + } + t.maxStreamID = streamID + t.activeStreams[streamID] = s + if len(t.activeStreams) == 1 { + t.idle = time.Time{} + } + t.mu.Unlock() + if channelz.IsOn() { + t.czmu.Lock() + t.streamsStarted++ + t.lastStreamCreated = time.Now() + t.czmu.Unlock() + } + s.requestRead = func(n int) { + t.adjustWindow(s, uint32(n)) + } + s.ctx = traceCtx(s.ctx, s.method) + if t.stats != nil { + s.ctx = t.stats.TagRPC(s.ctx, &stats.RPCTagInfo{FullMethodName: s.method}) + inHeader := &stats.InHeader{ + FullMethod: s.method, + RemoteAddr: t.remoteAddr, + LocalAddr: t.localAddr, + Compression: s.recvCompress, + WireLength: int(frame.Header().Length), + } + t.stats.HandleRPC(s.ctx, inHeader) + } + s.ctxDone = s.ctx.Done() + s.wq = newWriteQuota(defaultWriteQuota, s.ctxDone) + s.trReader = &transportReader{ + reader: &recvBufferReader{ + ctx: s.ctx, + ctxDone: s.ctxDone, + recv: s.buf, + }, + windowHandler: func(n int) { + t.updateWindow(s, uint32(n)) + }, + } + handle(s) + return +} + +// HandleStreams receives incoming streams using the given handler. This is +// typically run in a separate goroutine. +// traceCtx attaches trace to ctx and returns the new context. +func (t *http2Server) HandleStreams(handle func(*Stream), traceCtx func(context.Context, string) context.Context) { + defer close(t.readerDone) + for { + frame, err := t.framer.fr.ReadFrame() + atomic.StoreUint32(&t.activity, 1) + if err != nil { + if se, ok := err.(http2.StreamError); ok { + warningf("transport: http2Server.HandleStreams encountered http2.StreamError: %v", se) + t.mu.Lock() + s := t.activeStreams[se.StreamID] + t.mu.Unlock() + if s != nil { + t.closeStream(s, true, se.Code, nil, false) + } else { + t.controlBuf.put(&cleanupStream{ + streamID: se.StreamID, + rst: true, + rstCode: se.Code, + onWrite: func() {}, + }) + } + continue + } + if err == io.EOF || err == io.ErrUnexpectedEOF { + t.Close() + return + } + warningf("transport: http2Server.HandleStreams failed to read frame: %v", err) + t.Close() + return + } + switch frame := frame.(type) { + case *http2.MetaHeadersFrame: + if t.operateHeaders(frame, handle, traceCtx) { + t.Close() + break + } + case *http2.DataFrame: + t.handleData(frame) + case *http2.RSTStreamFrame: + t.handleRSTStream(frame) + case *http2.SettingsFrame: + t.handleSettings(frame) + case *http2.PingFrame: + t.handlePing(frame) + case *http2.WindowUpdateFrame: + t.handleWindowUpdate(frame) + case *http2.GoAwayFrame: + // TODO: Handle GoAway from the client appropriately. + default: + errorf("transport: http2Server.HandleStreams found unhandled frame type %v.", frame) + } + } +} + +func (t *http2Server) getStream(f http2.Frame) (*Stream, bool) { + t.mu.Lock() + defer t.mu.Unlock() + if t.activeStreams == nil { + // The transport is closing. + return nil, false + } + s, ok := t.activeStreams[f.Header().StreamID] + if !ok { + // The stream is already done. + return nil, false + } + return s, true +} + +// adjustWindow sends out extra window update over the initial window size +// of stream if the application is requesting data larger in size than +// the window. +func (t *http2Server) adjustWindow(s *Stream, n uint32) { + if w := s.fc.maybeAdjust(n); w > 0 { + t.controlBuf.put(&outgoingWindowUpdate{streamID: s.id, increment: w}) + } + +} + +// updateWindow adjusts the inbound quota for the stream and the transport. +// Window updates will deliver to the controller for sending when +// the cumulative quota exceeds the corresponding threshold. +func (t *http2Server) updateWindow(s *Stream, n uint32) { + if w := s.fc.onRead(n); w > 0 { + t.controlBuf.put(&outgoingWindowUpdate{streamID: s.id, + increment: w, + }) + } +} + +// updateFlowControl updates the incoming flow control windows +// for the transport and the stream based on the current bdp +// estimation. +func (t *http2Server) updateFlowControl(n uint32) { + t.mu.Lock() + for _, s := range t.activeStreams { + s.fc.newLimit(n) + } + t.initialWindowSize = int32(n) + t.mu.Unlock() + t.controlBuf.put(&outgoingWindowUpdate{ + streamID: 0, + increment: t.fc.newLimit(n), + }) + t.controlBuf.put(&outgoingSettings{ + ss: []http2.Setting{ + { + ID: http2.SettingInitialWindowSize, + Val: n, + }, + }, + }) + +} + +func (t *http2Server) handleData(f *http2.DataFrame) { + size := f.Header().Length + var sendBDPPing bool + if t.bdpEst != nil { + sendBDPPing = t.bdpEst.add(size) + } + // Decouple connection's flow control from application's read. + // An update on connection's flow control should not depend on + // whether user application has read the data or not. Such a + // restriction is already imposed on the stream's flow control, + // and therefore the sender will be blocked anyways. + // Decoupling the connection flow control will prevent other + // active(fast) streams from starving in presence of slow or + // inactive streams. + if w := t.fc.onData(size); w > 0 { + t.controlBuf.put(&outgoingWindowUpdate{ + streamID: 0, + increment: w, + }) + } + if sendBDPPing { + // Avoid excessive ping detection (e.g. in an L7 proxy) + // by sending a window update prior to the BDP ping. + if w := t.fc.reset(); w > 0 { + t.controlBuf.put(&outgoingWindowUpdate{ + streamID: 0, + increment: w, + }) + } + t.controlBuf.put(bdpPing) + } + // Select the right stream to dispatch. + s, ok := t.getStream(f) + if !ok { + return + } + if size > 0 { + if err := s.fc.onData(size); err != nil { + t.closeStream(s, true, http2.ErrCodeFlowControl, nil, false) + return + } + if f.Header().Flags.Has(http2.FlagDataPadded) { + if w := s.fc.onRead(size - uint32(len(f.Data()))); w > 0 { + t.controlBuf.put(&outgoingWindowUpdate{s.id, w}) + } + } + // TODO(bradfitz, zhaoq): A copy is required here because there is no + // guarantee f.Data() is consumed before the arrival of next frame. + // Can this copy be eliminated? + if len(f.Data()) > 0 { + data := make([]byte, len(f.Data())) + copy(data, f.Data()) + s.write(recvMsg{data: data}) + } + } + if f.Header().Flags.Has(http2.FlagDataEndStream) { + // Received the end of stream from the client. + s.compareAndSwapState(streamActive, streamReadDone) + s.write(recvMsg{err: io.EOF}) + } +} + +func (t *http2Server) handleRSTStream(f *http2.RSTStreamFrame) { + s, ok := t.getStream(f) + if !ok { + return + } + t.closeStream(s, false, 0, nil, false) +} + +func (t *http2Server) handleSettings(f *http2.SettingsFrame) { + if f.IsAck() { + return + } + var ss []http2.Setting + f.ForeachSetting(func(s http2.Setting) error { + ss = append(ss, s) + return nil + }) + t.controlBuf.put(&incomingSettings{ + ss: ss, + }) +} + +const ( + maxPingStrikes = 2 + defaultPingTimeout = 2 * time.Hour +) + +func (t *http2Server) handlePing(f *http2.PingFrame) { + if f.IsAck() { + if f.Data == goAwayPing.data && t.drainChan != nil { + close(t.drainChan) + return + } + // Maybe it's a BDP ping. + if t.bdpEst != nil { + t.bdpEst.calculate(f.Data) + } + return + } + pingAck := &ping{ack: true} + copy(pingAck.data[:], f.Data[:]) + t.controlBuf.put(pingAck) + + now := time.Now() + defer func() { + t.lastPingAt = now + }() + // A reset ping strikes means that we don't need to check for policy + // violation for this ping and the pingStrikes counter should be set + // to 0. + if atomic.CompareAndSwapUint32(&t.resetPingStrikes, 1, 0) { + t.pingStrikes = 0 + return + } + t.mu.Lock() + ns := len(t.activeStreams) + t.mu.Unlock() + if ns < 1 && !t.kep.PermitWithoutStream { + // Keepalive shouldn't be active thus, this new ping should + // have come after at least defaultPingTimeout. + if t.lastPingAt.Add(defaultPingTimeout).After(now) { + t.pingStrikes++ + } + } else { + // Check if keepalive policy is respected. + if t.lastPingAt.Add(t.kep.MinTime).After(now) { + t.pingStrikes++ + } + } + + if t.pingStrikes > maxPingStrikes { + // Send goaway and close the connection. + errorf("transport: Got too many pings from the client, closing the connection.") + t.controlBuf.put(&goAway{code: http2.ErrCodeEnhanceYourCalm, debugData: []byte("too_many_pings"), closeConn: true}) + } +} + +func (t *http2Server) handleWindowUpdate(f *http2.WindowUpdateFrame) { + t.controlBuf.put(&incomingWindowUpdate{ + streamID: f.Header().StreamID, + increment: f.Increment, + }) +} + +func appendHeaderFieldsFromMD(headerFields []hpack.HeaderField, md metadata.MD) []hpack.HeaderField { + for k, vv := range md { + if isReservedHeader(k) { + // Clients don't tolerate reading restricted headers after some non restricted ones were sent. + continue + } + for _, v := range vv { + headerFields = append(headerFields, hpack.HeaderField{Name: k, Value: encodeMetadataHeader(k, v)}) + } + } + return headerFields +} + +// WriteHeader sends the header metedata md back to the client. +func (t *http2Server) WriteHeader(s *Stream, md metadata.MD) error { + if s.updateHeaderSent() || s.getState() == streamDone { + return ErrIllegalHeaderWrite + } + s.hdrMu.Lock() + if md.Len() > 0 { + if s.header.Len() > 0 { + s.header = metadata.Join(s.header, md) + } else { + s.header = md + } + } + t.writeHeaderLocked(s) + s.hdrMu.Unlock() + return nil +} + +func (t *http2Server) writeHeaderLocked(s *Stream) { + // TODO(mmukhi): Benchmark if the performance gets better if count the metadata and other header fields + // first and create a slice of that exact size. + headerFields := make([]hpack.HeaderField, 0, 2) // at least :status, content-type will be there if none else. + headerFields = append(headerFields, hpack.HeaderField{Name: ":status", Value: "200"}) + headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: contentType(s.contentSubtype)}) + if s.sendCompress != "" { + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-encoding", Value: s.sendCompress}) + } + headerFields = appendHeaderFieldsFromMD(headerFields, s.header) + t.controlBuf.put(&headerFrame{ + streamID: s.id, + hf: headerFields, + endStream: false, + onWrite: func() { + atomic.StoreUint32(&t.resetPingStrikes, 1) + }, + wq: s.wq, + }) + if t.stats != nil { + // Note: WireLength is not set in outHeader. + // TODO(mmukhi): Revisit this later, if needed. + outHeader := &stats.OutHeader{} + t.stats.HandleRPC(s.Context(), outHeader) + } +} + +// WriteStatus sends stream status to the client and terminates the stream. +// There is no further I/O operations being able to perform on this stream. +// TODO(zhaoq): Now it indicates the end of entire stream. Revisit if early +// OK is adopted. +func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error { + if s.getState() == streamDone { + return nil + } + s.hdrMu.Lock() + // TODO(mmukhi): Benchmark if the performance gets better if count the metadata and other header fields + // first and create a slice of that exact size. + headerFields := make([]hpack.HeaderField, 0, 2) // grpc-status and grpc-message will be there if none else. + if !s.updateHeaderSent() { // No headers have been sent. + if len(s.header) > 0 { // Send a separate header frame. + t.writeHeaderLocked(s) + } else { // Send a trailer only response. + headerFields = append(headerFields, hpack.HeaderField{Name: ":status", Value: "200"}) + headerFields = append(headerFields, hpack.HeaderField{Name: "content-type", Value: contentType(s.contentSubtype)}) + } + } + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-status", Value: strconv.Itoa(int(st.Code()))}) + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-message", Value: encodeGrpcMessage(st.Message())}) + + if p := st.Proto(); p != nil && len(p.Details) > 0 { + stBytes, err := proto.Marshal(p) + if err != nil { + // TODO: return error instead, when callers are able to handle it. + panic(err) + } + + headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-status-details-bin", Value: encodeBinHeader(stBytes)}) + } + + // Attach the trailer metadata. + headerFields = appendHeaderFieldsFromMD(headerFields, s.trailer) + trailingHeader := &headerFrame{ + streamID: s.id, + hf: headerFields, + endStream: true, + onWrite: func() { + atomic.StoreUint32(&t.resetPingStrikes, 1) + }, + } + s.hdrMu.Unlock() + t.closeStream(s, false, 0, trailingHeader, true) + if t.stats != nil { + t.stats.HandleRPC(s.Context(), &stats.OutTrailer{}) + } + return nil +} + +// Write converts the data into HTTP2 data frame and sends it out. Non-nil error +// is returns if it fails (e.g., framing error, transport error). +func (t *http2Server) Write(s *Stream, hdr []byte, data []byte, opts *Options) error { + if !s.isHeaderSent() { // Headers haven't been written yet. + if err := t.WriteHeader(s, nil); err != nil { + // TODO(mmukhi, dfawley): Make sure this is the right code to return. + return streamErrorf(codes.Internal, "transport: %v", err) + } + } else { + // Writing headers checks for this condition. + if s.getState() == streamDone { + // TODO(mmukhi, dfawley): Should the server write also return io.EOF? + s.cancel() + select { + case <-t.ctx.Done(): + return ErrConnClosing + default: + } + return ContextErr(s.ctx.Err()) + } + } + // Add some data to header frame so that we can equally distribute bytes across frames. + emptyLen := http2MaxFrameLen - len(hdr) + if emptyLen > len(data) { + emptyLen = len(data) + } + hdr = append(hdr, data[:emptyLen]...) + data = data[emptyLen:] + df := &dataFrame{ + streamID: s.id, + h: hdr, + d: data, + onEachWrite: func() { + atomic.StoreUint32(&t.resetPingStrikes, 1) + }, + } + if err := s.wq.get(int32(len(hdr) + len(data))); err != nil { + select { + case <-t.ctx.Done(): + return ErrConnClosing + default: + } + return ContextErr(s.ctx.Err()) + } + return t.controlBuf.put(df) +} + +// keepalive running in a separate goroutine does the following: +// 1. Gracefully closes an idle connection after a duration of keepalive.MaxConnectionIdle. +// 2. Gracefully closes any connection after a duration of keepalive.MaxConnectionAge. +// 3. Forcibly closes a connection after an additive period of keepalive.MaxConnectionAgeGrace over keepalive.MaxConnectionAge. +// 4. Makes sure a connection is alive by sending pings with a frequency of keepalive.Time and closes a non-responsive connection +// after an additional duration of keepalive.Timeout. +func (t *http2Server) keepalive() { + p := &ping{} + var pingSent bool + maxIdle := time.NewTimer(t.kp.MaxConnectionIdle) + maxAge := time.NewTimer(t.kp.MaxConnectionAge) + keepalive := time.NewTimer(t.kp.Time) + // NOTE: All exit paths of this function should reset their + // respective timers. A failure to do so will cause the + // following clean-up to deadlock and eventually leak. + defer func() { + if !maxIdle.Stop() { + <-maxIdle.C + } + if !maxAge.Stop() { + <-maxAge.C + } + if !keepalive.Stop() { + <-keepalive.C + } + }() + for { + select { + case <-maxIdle.C: + t.mu.Lock() + idle := t.idle + if idle.IsZero() { // The connection is non-idle. + t.mu.Unlock() + maxIdle.Reset(t.kp.MaxConnectionIdle) + continue + } + val := t.kp.MaxConnectionIdle - time.Since(idle) + t.mu.Unlock() + if val <= 0 { + // The connection has been idle for a duration of keepalive.MaxConnectionIdle or more. + // Gracefully close the connection. + t.drain(http2.ErrCodeNo, []byte{}) + // Resetting the timer so that the clean-up doesn't deadlock. + maxIdle.Reset(infinity) + return + } + maxIdle.Reset(val) + case <-maxAge.C: + t.drain(http2.ErrCodeNo, []byte{}) + maxAge.Reset(t.kp.MaxConnectionAgeGrace) + select { + case <-maxAge.C: + // Close the connection after grace period. + t.Close() + // Resetting the timer so that the clean-up doesn't deadlock. + maxAge.Reset(infinity) + case <-t.ctx.Done(): + } + return + case <-keepalive.C: + if atomic.CompareAndSwapUint32(&t.activity, 1, 0) { + pingSent = false + keepalive.Reset(t.kp.Time) + continue + } + if pingSent { + t.Close() + // Resetting the timer so that the clean-up doesn't deadlock. + keepalive.Reset(infinity) + return + } + pingSent = true + if channelz.IsOn() { + t.czmu.Lock() + t.kpCount++ + t.czmu.Unlock() + } + t.controlBuf.put(p) + keepalive.Reset(t.kp.Timeout) + case <-t.ctx.Done(): + return + } + } +} + +// Close starts shutting down the http2Server transport. +// TODO(zhaoq): Now the destruction is not blocked on any pending streams. This +// could cause some resource issue. Revisit this later. +func (t *http2Server) Close() error { + t.mu.Lock() + if t.state == closing { + t.mu.Unlock() + return errors.New("transport: Close() was already called") + } + t.state = closing + streams := t.activeStreams + t.activeStreams = nil + t.mu.Unlock() + t.controlBuf.finish() + t.cancel() + err := t.conn.Close() + if channelz.IsOn() { + channelz.RemoveEntry(t.channelzID) + } + // Cancel all active streams. + for _, s := range streams { + s.cancel() + } + if t.stats != nil { + connEnd := &stats.ConnEnd{} + t.stats.HandleConn(t.ctx, connEnd) + } + return err +} + +// closeStream clears the footprint of a stream when the stream is not needed +// any more. +func (t *http2Server) closeStream(s *Stream, rst bool, rstCode http2.ErrCode, hdr *headerFrame, eosReceived bool) { + if s.swapState(streamDone) == streamDone { + // If the stream was already done, return. + return + } + // In case stream sending and receiving are invoked in separate + // goroutines (e.g., bi-directional streaming), cancel needs to be + // called to interrupt the potential blocking on other goroutines. + s.cancel() + cleanup := &cleanupStream{ + streamID: s.id, + rst: rst, + rstCode: rstCode, + onWrite: func() { + t.mu.Lock() + if t.activeStreams != nil { + delete(t.activeStreams, s.id) + if len(t.activeStreams) == 0 { + t.idle = time.Now() + } + } + t.mu.Unlock() + if channelz.IsOn() { + t.czmu.Lock() + if eosReceived { + t.streamsSucceeded++ + } else { + t.streamsFailed++ + } + t.czmu.Unlock() + } + }, + } + if hdr != nil { + hdr.cleanup = cleanup + t.controlBuf.put(hdr) + } else { + t.controlBuf.put(cleanup) + } +} + +func (t *http2Server) RemoteAddr() net.Addr { + return t.remoteAddr +} + +func (t *http2Server) Drain() { + t.drain(http2.ErrCodeNo, []byte{}) +} + +func (t *http2Server) drain(code http2.ErrCode, debugData []byte) { + t.mu.Lock() + defer t.mu.Unlock() + if t.drainChan != nil { + return + } + t.drainChan = make(chan struct{}) + t.controlBuf.put(&goAway{code: code, debugData: debugData, headsUp: true}) +} + +var goAwayPing = &ping{data: [8]byte{1, 6, 1, 8, 0, 3, 3, 9}} + +// Handles outgoing GoAway and returns true if loopy needs to put itself +// in draining mode. +func (t *http2Server) outgoingGoAwayHandler(g *goAway) (bool, error) { + t.mu.Lock() + if t.state == closing { // TODO(mmukhi): This seems unnecessary. + t.mu.Unlock() + // The transport is closing. + return false, ErrConnClosing + } + sid := t.maxStreamID + if !g.headsUp { + // Stop accepting more streams now. + t.state = draining + if len(t.activeStreams) == 0 { + g.closeConn = true + } + t.mu.Unlock() + if err := t.framer.fr.WriteGoAway(sid, g.code, g.debugData); err != nil { + return false, err + } + if g.closeConn { + // Abruptly close the connection following the GoAway (via + // loopywriter). But flush out what's inside the buffer first. + t.framer.writer.Flush() + return false, fmt.Errorf("transport: Connection closing") + } + return true, nil + } + t.mu.Unlock() + // For a graceful close, send out a GoAway with stream ID of MaxUInt32, + // Follow that with a ping and wait for the ack to come back or a timer + // to expire. During this time accept new streams since they might have + // originated before the GoAway reaches the client. + // After getting the ack or timer expiration send out another GoAway this + // time with an ID of the max stream server intends to process. + if err := t.framer.fr.WriteGoAway(math.MaxUint32, http2.ErrCodeNo, []byte{}); err != nil { + return false, err + } + if err := t.framer.fr.WritePing(false, goAwayPing.data); err != nil { + return false, err + } + go func() { + timer := time.NewTimer(time.Minute) + defer timer.Stop() + select { + case <-t.drainChan: + case <-timer.C: + case <-t.ctx.Done(): + return + } + t.controlBuf.put(&goAway{code: g.code, debugData: g.debugData}) + }() + return false, nil +} + +func (t *http2Server) ChannelzMetric() *channelz.SocketInternalMetric { + t.czmu.RLock() + s := channelz.SocketInternalMetric{ + StreamsStarted: t.streamsStarted, + StreamsSucceeded: t.streamsSucceeded, + StreamsFailed: t.streamsFailed, + MessagesSent: t.msgSent, + MessagesReceived: t.msgRecv, + KeepAlivesSent: t.kpCount, + LastRemoteStreamCreatedTimestamp: t.lastStreamCreated, + LastMessageSentTimestamp: t.lastMsgSent, + LastMessageReceivedTimestamp: t.lastMsgRecv, + LocalFlowControlWindow: int64(t.fc.getSize()), + //socket options + LocalAddr: t.localAddr, + RemoteAddr: t.remoteAddr, + // Security + // RemoteName : + } + t.czmu.RUnlock() + s.RemoteFlowControlWindow = t.getOutFlowWindow() + return &s +} + +func (t *http2Server) IncrMsgSent() { + t.czmu.Lock() + t.msgSent++ + t.lastMsgSent = time.Now() + t.czmu.Unlock() +} + +func (t *http2Server) IncrMsgRecv() { + t.czmu.Lock() + t.msgRecv++ + t.lastMsgRecv = time.Now() + t.czmu.Unlock() +} + +func (t *http2Server) getOutFlowWindow() int64 { + resp := make(chan uint32) + timer := time.NewTimer(time.Second) + defer timer.Stop() + t.controlBuf.put(&outFlowControlSizeRequest{resp}) + select { + case sz := <-resp: + return int64(sz) + case <-t.ctxDone: + return -1 + case <-timer.C: + return -2 + } +} + +var rgen = rand.New(rand.NewSource(time.Now().UnixNano())) + +func getJitter(v time.Duration) time.Duration { + if v == infinity { + return 0 + } + // Generate a jitter between +/- 10% of the value. + r := int64(v / 10) + j := rgen.Int63n(2*r) - r + return time.Duration(j) +} diff --git a/vendor/google.golang.org/grpc/transport/http_util.go b/vendor/google.golang.org/grpc/transport/http_util.go new file mode 100644 index 00000000000..835c8126946 --- /dev/null +++ b/vendor/google.golang.org/grpc/transport/http_util.go @@ -0,0 +1,578 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package transport + +import ( + "bufio" + "bytes" + "encoding/base64" + "fmt" + "net" + "net/http" + "strconv" + "strings" + "time" + + "github.com/golang/protobuf/proto" + "golang.org/x/net/http2" + "golang.org/x/net/http2/hpack" + spb "google.golang.org/genproto/googleapis/rpc/status" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +const ( + // http2MaxFrameLen specifies the max length of a HTTP2 frame. + http2MaxFrameLen = 16384 // 16KB frame + // http://http2.github.io/http2-spec/#SettingValues + http2InitHeaderTableSize = 4096 + // http2IOBufSize specifies the buffer size for sending frames. + defaultWriteBufSize = 32 * 1024 + defaultReadBufSize = 32 * 1024 + // baseContentType is the base content-type for gRPC. This is a valid + // content-type on it's own, but can also include a content-subtype such as + // "proto" as a suffix after "+" or ";". See + // https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests + // for more details. + baseContentType = "application/grpc" +) + +var ( + clientPreface = []byte(http2.ClientPreface) + http2ErrConvTab = map[http2.ErrCode]codes.Code{ + http2.ErrCodeNo: codes.Internal, + http2.ErrCodeProtocol: codes.Internal, + http2.ErrCodeInternal: codes.Internal, + http2.ErrCodeFlowControl: codes.ResourceExhausted, + http2.ErrCodeSettingsTimeout: codes.Internal, + http2.ErrCodeStreamClosed: codes.Internal, + http2.ErrCodeFrameSize: codes.Internal, + http2.ErrCodeRefusedStream: codes.Unavailable, + http2.ErrCodeCancel: codes.Canceled, + http2.ErrCodeCompression: codes.Internal, + http2.ErrCodeConnect: codes.Internal, + http2.ErrCodeEnhanceYourCalm: codes.ResourceExhausted, + http2.ErrCodeInadequateSecurity: codes.PermissionDenied, + http2.ErrCodeHTTP11Required: codes.Internal, + } + statusCodeConvTab = map[codes.Code]http2.ErrCode{ + codes.Internal: http2.ErrCodeInternal, + codes.Canceled: http2.ErrCodeCancel, + codes.Unavailable: http2.ErrCodeRefusedStream, + codes.ResourceExhausted: http2.ErrCodeEnhanceYourCalm, + codes.PermissionDenied: http2.ErrCodeInadequateSecurity, + } + httpStatusConvTab = map[int]codes.Code{ + // 400 Bad Request - INTERNAL. + http.StatusBadRequest: codes.Internal, + // 401 Unauthorized - UNAUTHENTICATED. + http.StatusUnauthorized: codes.Unauthenticated, + // 403 Forbidden - PERMISSION_DENIED. + http.StatusForbidden: codes.PermissionDenied, + // 404 Not Found - UNIMPLEMENTED. + http.StatusNotFound: codes.Unimplemented, + // 429 Too Many Requests - UNAVAILABLE. + http.StatusTooManyRequests: codes.Unavailable, + // 502 Bad Gateway - UNAVAILABLE. + http.StatusBadGateway: codes.Unavailable, + // 503 Service Unavailable - UNAVAILABLE. + http.StatusServiceUnavailable: codes.Unavailable, + // 504 Gateway timeout - UNAVAILABLE. + http.StatusGatewayTimeout: codes.Unavailable, + } +) + +// Records the states during HPACK decoding. Must be reset once the +// decoding of the entire headers are finished. +type decodeState struct { + encoding string + // statusGen caches the stream status received from the trailer the server + // sent. Client side only. Do not access directly. After all trailers are + // parsed, use the status method to retrieve the status. + statusGen *status.Status + // rawStatusCode and rawStatusMsg are set from the raw trailer fields and are not + // intended for direct access outside of parsing. + rawStatusCode *int + rawStatusMsg string + httpStatus *int + // Server side only fields. + timeoutSet bool + timeout time.Duration + method string + // key-value metadata map from the peer. + mdata map[string][]string + statsTags []byte + statsTrace []byte + contentSubtype string +} + +// isReservedHeader checks whether hdr belongs to HTTP2 headers +// reserved by gRPC protocol. Any other headers are classified as the +// user-specified metadata. +func isReservedHeader(hdr string) bool { + if hdr != "" && hdr[0] == ':' { + return true + } + switch hdr { + case "content-type", + "user-agent", + "grpc-message-type", + "grpc-encoding", + "grpc-message", + "grpc-status", + "grpc-timeout", + "grpc-status-details-bin", + "te": + return true + default: + return false + } +} + +// isWhitelistedHeader checks whether hdr should be propagated +// into metadata visible to users. +func isWhitelistedHeader(hdr string) bool { + switch hdr { + case ":authority", "user-agent": + return true + default: + return false + } +} + +// contentSubtype returns the content-subtype for the given content-type. The +// given content-type must be a valid content-type that starts with +// "application/grpc". A content-subtype will follow "application/grpc" after a +// "+" or ";". See +// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for +// more details. +// +// If contentType is not a valid content-type for gRPC, the boolean +// will be false, otherwise true. If content-type == "application/grpc", +// "application/grpc+", or "application/grpc;", the boolean will be true, +// but no content-subtype will be returned. +// +// contentType is assumed to be lowercase already. +func contentSubtype(contentType string) (string, bool) { + if contentType == baseContentType { + return "", true + } + if !strings.HasPrefix(contentType, baseContentType) { + return "", false + } + // guaranteed since != baseContentType and has baseContentType prefix + switch contentType[len(baseContentType)] { + case '+', ';': + // this will return true for "application/grpc+" or "application/grpc;" + // which the previous validContentType function tested to be valid, so we + // just say that no content-subtype is specified in this case + return contentType[len(baseContentType)+1:], true + default: + return "", false + } +} + +// contentSubtype is assumed to be lowercase +func contentType(contentSubtype string) string { + if contentSubtype == "" { + return baseContentType + } + return baseContentType + "+" + contentSubtype +} + +func (d *decodeState) status() *status.Status { + if d.statusGen == nil { + // No status-details were provided; generate status using code/msg. + d.statusGen = status.New(codes.Code(int32(*(d.rawStatusCode))), d.rawStatusMsg) + } + return d.statusGen +} + +const binHdrSuffix = "-bin" + +func encodeBinHeader(v []byte) string { + return base64.RawStdEncoding.EncodeToString(v) +} + +func decodeBinHeader(v string) ([]byte, error) { + if len(v)%4 == 0 { + // Input was padded, or padding was not necessary. + return base64.StdEncoding.DecodeString(v) + } + return base64.RawStdEncoding.DecodeString(v) +} + +func encodeMetadataHeader(k, v string) string { + if strings.HasSuffix(k, binHdrSuffix) { + return encodeBinHeader(([]byte)(v)) + } + return v +} + +func decodeMetadataHeader(k, v string) (string, error) { + if strings.HasSuffix(k, binHdrSuffix) { + b, err := decodeBinHeader(v) + return string(b), err + } + return v, nil +} + +func (d *decodeState) decodeResponseHeader(frame *http2.MetaHeadersFrame) error { + for _, hf := range frame.Fields { + if err := d.processHeaderField(hf); err != nil { + return err + } + } + + // If grpc status exists, no need to check further. + if d.rawStatusCode != nil || d.statusGen != nil { + return nil + } + + // If grpc status doesn't exist and http status doesn't exist, + // then it's a malformed header. + if d.httpStatus == nil { + return streamErrorf(codes.Internal, "malformed header: doesn't contain status(gRPC or HTTP)") + } + + if *(d.httpStatus) != http.StatusOK { + code, ok := httpStatusConvTab[*(d.httpStatus)] + if !ok { + code = codes.Unknown + } + return streamErrorf(code, http.StatusText(*(d.httpStatus))) + } + + // gRPC status doesn't exist and http status is OK. + // Set rawStatusCode to be unknown and return nil error. + // So that, if the stream has ended this Unknown status + // will be propagated to the user. + // Otherwise, it will be ignored. In which case, status from + // a later trailer, that has StreamEnded flag set, is propagated. + code := int(codes.Unknown) + d.rawStatusCode = &code + return nil + +} + +func (d *decodeState) addMetadata(k, v string) { + if d.mdata == nil { + d.mdata = make(map[string][]string) + } + d.mdata[k] = append(d.mdata[k], v) +} + +func (d *decodeState) processHeaderField(f hpack.HeaderField) error { + switch f.Name { + case "content-type": + contentSubtype, validContentType := contentSubtype(f.Value) + if !validContentType { + return streamErrorf(codes.Internal, "transport: received the unexpected content-type %q", f.Value) + } + d.contentSubtype = contentSubtype + // TODO: do we want to propagate the whole content-type in the metadata, + // or come up with a way to just propagate the content-subtype if it was set? + // ie {"content-type": "application/grpc+proto"} or {"content-subtype": "proto"} + // in the metadata? + d.addMetadata(f.Name, f.Value) + case "grpc-encoding": + d.encoding = f.Value + case "grpc-status": + code, err := strconv.Atoi(f.Value) + if err != nil { + return streamErrorf(codes.Internal, "transport: malformed grpc-status: %v", err) + } + d.rawStatusCode = &code + case "grpc-message": + d.rawStatusMsg = decodeGrpcMessage(f.Value) + case "grpc-status-details-bin": + v, err := decodeBinHeader(f.Value) + if err != nil { + return streamErrorf(codes.Internal, "transport: malformed grpc-status-details-bin: %v", err) + } + s := &spb.Status{} + if err := proto.Unmarshal(v, s); err != nil { + return streamErrorf(codes.Internal, "transport: malformed grpc-status-details-bin: %v", err) + } + d.statusGen = status.FromProto(s) + case "grpc-timeout": + d.timeoutSet = true + var err error + if d.timeout, err = decodeTimeout(f.Value); err != nil { + return streamErrorf(codes.Internal, "transport: malformed time-out: %v", err) + } + case ":path": + d.method = f.Value + case ":status": + code, err := strconv.Atoi(f.Value) + if err != nil { + return streamErrorf(codes.Internal, "transport: malformed http-status: %v", err) + } + d.httpStatus = &code + case "grpc-tags-bin": + v, err := decodeBinHeader(f.Value) + if err != nil { + return streamErrorf(codes.Internal, "transport: malformed grpc-tags-bin: %v", err) + } + d.statsTags = v + d.addMetadata(f.Name, string(v)) + case "grpc-trace-bin": + v, err := decodeBinHeader(f.Value) + if err != nil { + return streamErrorf(codes.Internal, "transport: malformed grpc-trace-bin: %v", err) + } + d.statsTrace = v + d.addMetadata(f.Name, string(v)) + default: + if isReservedHeader(f.Name) && !isWhitelistedHeader(f.Name) { + break + } + v, err := decodeMetadataHeader(f.Name, f.Value) + if err != nil { + errorf("Failed to decode metadata header (%q, %q): %v", f.Name, f.Value, err) + return nil + } + d.addMetadata(f.Name, v) + } + return nil +} + +type timeoutUnit uint8 + +const ( + hour timeoutUnit = 'H' + minute timeoutUnit = 'M' + second timeoutUnit = 'S' + millisecond timeoutUnit = 'm' + microsecond timeoutUnit = 'u' + nanosecond timeoutUnit = 'n' +) + +func timeoutUnitToDuration(u timeoutUnit) (d time.Duration, ok bool) { + switch u { + case hour: + return time.Hour, true + case minute: + return time.Minute, true + case second: + return time.Second, true + case millisecond: + return time.Millisecond, true + case microsecond: + return time.Microsecond, true + case nanosecond: + return time.Nanosecond, true + default: + } + return +} + +const maxTimeoutValue int64 = 100000000 - 1 + +// div does integer division and round-up the result. Note that this is +// equivalent to (d+r-1)/r but has less chance to overflow. +func div(d, r time.Duration) int64 { + if m := d % r; m > 0 { + return int64(d/r + 1) + } + return int64(d / r) +} + +// TODO(zhaoq): It is the simplistic and not bandwidth efficient. Improve it. +func encodeTimeout(t time.Duration) string { + if t <= 0 { + return "0n" + } + if d := div(t, time.Nanosecond); d <= maxTimeoutValue { + return strconv.FormatInt(d, 10) + "n" + } + if d := div(t, time.Microsecond); d <= maxTimeoutValue { + return strconv.FormatInt(d, 10) + "u" + } + if d := div(t, time.Millisecond); d <= maxTimeoutValue { + return strconv.FormatInt(d, 10) + "m" + } + if d := div(t, time.Second); d <= maxTimeoutValue { + return strconv.FormatInt(d, 10) + "S" + } + if d := div(t, time.Minute); d <= maxTimeoutValue { + return strconv.FormatInt(d, 10) + "M" + } + // Note that maxTimeoutValue * time.Hour > MaxInt64. + return strconv.FormatInt(div(t, time.Hour), 10) + "H" +} + +func decodeTimeout(s string) (time.Duration, error) { + size := len(s) + if size < 2 { + return 0, fmt.Errorf("transport: timeout string is too short: %q", s) + } + unit := timeoutUnit(s[size-1]) + d, ok := timeoutUnitToDuration(unit) + if !ok { + return 0, fmt.Errorf("transport: timeout unit is not recognized: %q", s) + } + t, err := strconv.ParseInt(s[:size-1], 10, 64) + if err != nil { + return 0, err + } + return d * time.Duration(t), nil +} + +const ( + spaceByte = ' ' + tildaByte = '~' + percentByte = '%' +) + +// encodeGrpcMessage is used to encode status code in header field +// "grpc-message". +// It checks to see if each individual byte in msg is an +// allowable byte, and then either percent encoding or passing it through. +// When percent encoding, the byte is converted into hexadecimal notation +// with a '%' prepended. +func encodeGrpcMessage(msg string) string { + if msg == "" { + return "" + } + lenMsg := len(msg) + for i := 0; i < lenMsg; i++ { + c := msg[i] + if !(c >= spaceByte && c < tildaByte && c != percentByte) { + return encodeGrpcMessageUnchecked(msg) + } + } + return msg +} + +func encodeGrpcMessageUnchecked(msg string) string { + var buf bytes.Buffer + lenMsg := len(msg) + for i := 0; i < lenMsg; i++ { + c := msg[i] + if c >= spaceByte && c < tildaByte && c != percentByte { + buf.WriteByte(c) + } else { + buf.WriteString(fmt.Sprintf("%%%02X", c)) + } + } + return buf.String() +} + +// decodeGrpcMessage decodes the msg encoded by encodeGrpcMessage. +func decodeGrpcMessage(msg string) string { + if msg == "" { + return "" + } + lenMsg := len(msg) + for i := 0; i < lenMsg; i++ { + if msg[i] == percentByte && i+2 < lenMsg { + return decodeGrpcMessageUnchecked(msg) + } + } + return msg +} + +func decodeGrpcMessageUnchecked(msg string) string { + var buf bytes.Buffer + lenMsg := len(msg) + for i := 0; i < lenMsg; i++ { + c := msg[i] + if c == percentByte && i+2 < lenMsg { + parsed, err := strconv.ParseUint(msg[i+1:i+3], 16, 8) + if err != nil { + buf.WriteByte(c) + } else { + buf.WriteByte(byte(parsed)) + i += 2 + } + } else { + buf.WriteByte(c) + } + } + return buf.String() +} + +type bufWriter struct { + buf []byte + offset int + batchSize int + conn net.Conn + err error + + onFlush func() +} + +func newBufWriter(conn net.Conn, batchSize int) *bufWriter { + return &bufWriter{ + buf: make([]byte, batchSize*2), + batchSize: batchSize, + conn: conn, + } +} + +func (w *bufWriter) Write(b []byte) (n int, err error) { + if w.err != nil { + return 0, w.err + } + for len(b) > 0 { + nn := copy(w.buf[w.offset:], b) + b = b[nn:] + w.offset += nn + n += nn + if w.offset >= w.batchSize { + err = w.Flush() + } + } + return n, err +} + +func (w *bufWriter) Flush() error { + if w.err != nil { + return w.err + } + if w.offset == 0 { + return nil + } + if w.onFlush != nil { + w.onFlush() + } + _, w.err = w.conn.Write(w.buf[:w.offset]) + w.offset = 0 + return w.err +} + +type framer struct { + writer *bufWriter + fr *http2.Framer +} + +func newFramer(conn net.Conn, writeBufferSize, readBufferSize int) *framer { + r := bufio.NewReaderSize(conn, readBufferSize) + w := newBufWriter(conn, writeBufferSize) + f := &framer{ + writer: w, + fr: http2.NewFramer(w, r), + } + // Opt-in to Frame reuse API on framer to reduce garbage. + // Frames aren't safe to read from after a subsequent call to ReadFrame. + f.fr.SetReuseFrames() + f.fr.ReadMetaHeaders = hpack.NewDecoder(http2InitHeaderTableSize, nil) + return f +} diff --git a/vendor/google.golang.org/grpc/transport/log.go b/vendor/google.golang.org/grpc/transport/log.go new file mode 100644 index 00000000000..ac8e358c5c8 --- /dev/null +++ b/vendor/google.golang.org/grpc/transport/log.go @@ -0,0 +1,50 @@ +/* + * + * Copyright 2017 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// This file contains wrappers for grpclog functions. +// The transport package only logs to verbose level 2 by default. + +package transport + +import "google.golang.org/grpc/grpclog" + +const logLevel = 2 + +func infof(format string, args ...interface{}) { + if grpclog.V(logLevel) { + grpclog.Infof(format, args...) + } +} + +func warningf(format string, args ...interface{}) { + if grpclog.V(logLevel) { + grpclog.Warningf(format, args...) + } +} + +func errorf(format string, args ...interface{}) { + if grpclog.V(logLevel) { + grpclog.Errorf(format, args...) + } +} + +func fatalf(format string, args ...interface{}) { + if grpclog.V(logLevel) { + grpclog.Fatalf(format, args...) + } +} diff --git a/vendor/google.golang.org/grpc/transport/transport.go b/vendor/google.golang.org/grpc/transport/transport.go new file mode 100644 index 00000000000..f51f878884d --- /dev/null +++ b/vendor/google.golang.org/grpc/transport/transport.go @@ -0,0 +1,708 @@ +/* + * + * Copyright 2014 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +// Package transport defines and implements message oriented communication +// channel to complete various transactions (e.g., an RPC). It is meant for +// grpc-internal usage and is not intended to be imported directly by users. +package transport // externally used as import "google.golang.org/grpc/transport" + +import ( + "errors" + "fmt" + "io" + "net" + "sync" + "sync/atomic" + + "golang.org/x/net/context" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/keepalive" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/stats" + "google.golang.org/grpc/status" + "google.golang.org/grpc/tap" +) + +// recvMsg represents the received msg from the transport. All transport +// protocol specific info has been removed. +type recvMsg struct { + data []byte + // nil: received some data + // io.EOF: stream is completed. data is nil. + // other non-nil error: transport failure. data is nil. + err error +} + +// recvBuffer is an unbounded channel of recvMsg structs. +// Note recvBuffer differs from controlBuffer only in that recvBuffer +// holds a channel of only recvMsg structs instead of objects implementing "item" interface. +// recvBuffer is written to much more often than +// controlBuffer and using strict recvMsg structs helps avoid allocation in "recvBuffer.put" +type recvBuffer struct { + c chan recvMsg + mu sync.Mutex + backlog []recvMsg + err error +} + +func newRecvBuffer() *recvBuffer { + b := &recvBuffer{ + c: make(chan recvMsg, 1), + } + return b +} + +func (b *recvBuffer) put(r recvMsg) { + b.mu.Lock() + if b.err != nil { + b.mu.Unlock() + // An error had occurred earlier, don't accept more + // data or errors. + return + } + b.err = r.err + if len(b.backlog) == 0 { + select { + case b.c <- r: + b.mu.Unlock() + return + default: + } + } + b.backlog = append(b.backlog, r) + b.mu.Unlock() +} + +func (b *recvBuffer) load() { + b.mu.Lock() + if len(b.backlog) > 0 { + select { + case b.c <- b.backlog[0]: + b.backlog[0] = recvMsg{} + b.backlog = b.backlog[1:] + default: + } + } + b.mu.Unlock() +} + +// get returns the channel that receives a recvMsg in the buffer. +// +// Upon receipt of a recvMsg, the caller should call load to send another +// recvMsg onto the channel if there is any. +func (b *recvBuffer) get() <-chan recvMsg { + return b.c +} + +// +// recvBufferReader implements io.Reader interface to read the data from +// recvBuffer. +type recvBufferReader struct { + ctx context.Context + ctxDone <-chan struct{} // cache of ctx.Done() (for performance). + recv *recvBuffer + last []byte // Stores the remaining data in the previous calls. + err error +} + +// Read reads the next len(p) bytes from last. If last is drained, it tries to +// read additional data from recv. It blocks if there no additional data available +// in recv. If Read returns any non-nil error, it will continue to return that error. +func (r *recvBufferReader) Read(p []byte) (n int, err error) { + if r.err != nil { + return 0, r.err + } + n, r.err = r.read(p) + return n, r.err +} + +func (r *recvBufferReader) read(p []byte) (n int, err error) { + if r.last != nil && len(r.last) > 0 { + // Read remaining data left in last call. + copied := copy(p, r.last) + r.last = r.last[copied:] + return copied, nil + } + select { + case <-r.ctxDone: + return 0, ContextErr(r.ctx.Err()) + case m := <-r.recv.get(): + r.recv.load() + if m.err != nil { + return 0, m.err + } + copied := copy(p, m.data) + r.last = m.data[copied:] + return copied, nil + } +} + +type streamState uint32 + +const ( + streamActive streamState = iota + streamWriteDone // EndStream sent + streamReadDone // EndStream received + streamDone // the entire stream is finished. +) + +// Stream represents an RPC in the transport layer. +type Stream struct { + id uint32 + st ServerTransport // nil for client side Stream + ctx context.Context // the associated context of the stream + cancel context.CancelFunc // always nil for client side Stream + done chan struct{} // closed at the end of stream to unblock writers. On the client side. + ctxDone <-chan struct{} // same as done chan but for server side. Cache of ctx.Done() (for performance) + method string // the associated RPC method of the stream + recvCompress string + sendCompress string + buf *recvBuffer + trReader io.Reader + fc *inFlow + recvQuota uint32 + wq *writeQuota + + // Callback to state application's intentions to read data. This + // is used to adjust flow control, if needed. + requestRead func(int) + + headerChan chan struct{} // closed to indicate the end of header metadata. + headerDone uint32 // set when headerChan is closed. Used to avoid closing headerChan multiple times. + + // hdrMu protects header and trailer metadata on the server-side. + hdrMu sync.Mutex + header metadata.MD // the received header metadata. + trailer metadata.MD // the key-value map of trailer metadata. + + // On the server-side, headerSent is atomically set to 1 when the headers are sent out. + headerSent uint32 + + state streamState + + // On client-side it is the status error received from the server. + // On server-side it is unused. + status *status.Status + + bytesReceived uint32 // indicates whether any bytes have been received on this stream + unprocessed uint32 // set if the server sends a refused stream or GOAWAY including this stream + + // contentSubtype is the content-subtype for requests. + // this must be lowercase or the behavior is undefined. + contentSubtype string +} + +// isHeaderSent is only valid on the server-side. +func (s *Stream) isHeaderSent() bool { + return atomic.LoadUint32(&s.headerSent) == 1 +} + +// updateHeaderSent updates headerSent and returns true +// if it was alreay set. It is valid only on server-side. +func (s *Stream) updateHeaderSent() bool { + return atomic.SwapUint32(&s.headerSent, 1) == 1 +} + +func (s *Stream) swapState(st streamState) streamState { + return streamState(atomic.SwapUint32((*uint32)(&s.state), uint32(st))) +} + +func (s *Stream) compareAndSwapState(oldState, newState streamState) bool { + return atomic.CompareAndSwapUint32((*uint32)(&s.state), uint32(oldState), uint32(newState)) +} + +func (s *Stream) getState() streamState { + return streamState(atomic.LoadUint32((*uint32)(&s.state))) +} + +func (s *Stream) waitOnHeader() error { + if s.headerChan == nil { + // On the server headerChan is always nil since a stream originates + // only after having received headers. + return nil + } + select { + case <-s.ctx.Done(): + return ContextErr(s.ctx.Err()) + case <-s.headerChan: + return nil + } +} + +// RecvCompress returns the compression algorithm applied to the inbound +// message. It is empty string if there is no compression applied. +func (s *Stream) RecvCompress() string { + if err := s.waitOnHeader(); err != nil { + return "" + } + return s.recvCompress +} + +// SetSendCompress sets the compression algorithm to the stream. +func (s *Stream) SetSendCompress(str string) { + s.sendCompress = str +} + +// Done returns a chanel which is closed when it receives the final status +// from the server. +func (s *Stream) Done() <-chan struct{} { + return s.done +} + +// Header acquires the key-value pairs of header metadata once it +// is available. It blocks until i) the metadata is ready or ii) there is no +// header metadata or iii) the stream is canceled/expired. +func (s *Stream) Header() (metadata.MD, error) { + err := s.waitOnHeader() + // Even if the stream is closed, header is returned if available. + select { + case <-s.headerChan: + if s.header == nil { + return nil, nil + } + return s.header.Copy(), nil + default: + } + return nil, err +} + +// Trailer returns the cached trailer metedata. Note that if it is not called +// after the entire stream is done, it could return an empty MD. Client +// side only. +// It can be safely read only after stream has ended that is either read +// or write have returned io.EOF. +func (s *Stream) Trailer() metadata.MD { + c := s.trailer.Copy() + return c +} + +// ServerTransport returns the underlying ServerTransport for the stream. +// The client side stream always returns nil. +func (s *Stream) ServerTransport() ServerTransport { + return s.st +} + +// ContentSubtype returns the content-subtype for a request. For example, a +// content-subtype of "proto" will result in a content-type of +// "application/grpc+proto". This will always be lowercase. See +// https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests for +// more details. +func (s *Stream) ContentSubtype() string { + return s.contentSubtype +} + +// Context returns the context of the stream. +func (s *Stream) Context() context.Context { + return s.ctx +} + +// Method returns the method for the stream. +func (s *Stream) Method() string { + return s.method +} + +// Status returns the status received from the server. +// Status can be read safely only after the stream has ended, +// that is, read or write has returned io.EOF. +func (s *Stream) Status() *status.Status { + return s.status +} + +// SetHeader sets the header metadata. This can be called multiple times. +// Server side only. +// This should not be called in parallel to other data writes. +func (s *Stream) SetHeader(md metadata.MD) error { + if md.Len() == 0 { + return nil + } + if s.isHeaderSent() || s.getState() == streamDone { + return ErrIllegalHeaderWrite + } + s.hdrMu.Lock() + s.header = metadata.Join(s.header, md) + s.hdrMu.Unlock() + return nil +} + +// SendHeader sends the given header metadata. The given metadata is +// combined with any metadata set by previous calls to SetHeader and +// then written to the transport stream. +func (s *Stream) SendHeader(md metadata.MD) error { + t := s.ServerTransport() + return t.WriteHeader(s, md) +} + +// SetTrailer sets the trailer metadata which will be sent with the RPC status +// by the server. This can be called multiple times. Server side only. +// This should not be called parallel to other data writes. +func (s *Stream) SetTrailer(md metadata.MD) error { + if md.Len() == 0 { + return nil + } + if s.getState() == streamDone { + return ErrIllegalHeaderWrite + } + s.hdrMu.Lock() + s.trailer = metadata.Join(s.trailer, md) + s.hdrMu.Unlock() + return nil +} + +func (s *Stream) write(m recvMsg) { + s.buf.put(m) +} + +// Read reads all p bytes from the wire for this stream. +func (s *Stream) Read(p []byte) (n int, err error) { + // Don't request a read if there was an error earlier + if er := s.trReader.(*transportReader).er; er != nil { + return 0, er + } + s.requestRead(len(p)) + return io.ReadFull(s.trReader, p) +} + +// tranportReader reads all the data available for this Stream from the transport and +// passes them into the decoder, which converts them into a gRPC message stream. +// The error is io.EOF when the stream is done or another non-nil error if +// the stream broke. +type transportReader struct { + reader io.Reader + // The handler to control the window update procedure for both this + // particular stream and the associated transport. + windowHandler func(int) + er error +} + +func (t *transportReader) Read(p []byte) (n int, err error) { + n, err = t.reader.Read(p) + if err != nil { + t.er = err + return + } + t.windowHandler(n) + return +} + +// BytesReceived indicates whether any bytes have been received on this stream. +func (s *Stream) BytesReceived() bool { + return atomic.LoadUint32(&s.bytesReceived) == 1 +} + +// Unprocessed indicates whether the server did not process this stream -- +// i.e. it sent a refused stream or GOAWAY including this stream ID. +func (s *Stream) Unprocessed() bool { + return atomic.LoadUint32(&s.unprocessed) == 1 +} + +// GoString is implemented by Stream so context.String() won't +// race when printing %#v. +func (s *Stream) GoString() string { + return fmt.Sprintf("", s, s.method) +} + +// state of transport +type transportState int + +const ( + reachable transportState = iota + closing + draining +) + +// ServerConfig consists of all the configurations to establish a server transport. +type ServerConfig struct { + MaxStreams uint32 + AuthInfo credentials.AuthInfo + InTapHandle tap.ServerInHandle + StatsHandler stats.Handler + KeepaliveParams keepalive.ServerParameters + KeepalivePolicy keepalive.EnforcementPolicy + InitialWindowSize int32 + InitialConnWindowSize int32 + WriteBufferSize int + ReadBufferSize int + ChannelzParentID int64 +} + +// NewServerTransport creates a ServerTransport with conn or non-nil error +// if it fails. +func NewServerTransport(protocol string, conn net.Conn, config *ServerConfig) (ServerTransport, error) { + return newHTTP2Server(conn, config) +} + +// ConnectOptions covers all relevant options for communicating with the server. +type ConnectOptions struct { + // UserAgent is the application user agent. + UserAgent string + // Authority is the :authority pseudo-header to use. This field has no effect if + // TransportCredentials is set. + Authority string + // Dialer specifies how to dial a network address. + Dialer func(context.Context, string) (net.Conn, error) + // FailOnNonTempDialError specifies if gRPC fails on non-temporary dial errors. + FailOnNonTempDialError bool + // PerRPCCredentials stores the PerRPCCredentials required to issue RPCs. + PerRPCCredentials []credentials.PerRPCCredentials + // TransportCredentials stores the Authenticator required to setup a client connection. + TransportCredentials credentials.TransportCredentials + // KeepaliveParams stores the keepalive parameters. + KeepaliveParams keepalive.ClientParameters + // StatsHandler stores the handler for stats. + StatsHandler stats.Handler + // InitialWindowSize sets the initial window size for a stream. + InitialWindowSize int32 + // InitialConnWindowSize sets the initial window size for a connection. + InitialConnWindowSize int32 + // WriteBufferSize sets the size of write buffer which in turn determines how much data can be batched before it's written on the wire. + WriteBufferSize int + // ReadBufferSize sets the size of read buffer, which in turn determines how much data can be read at most for one read syscall. + ReadBufferSize int + // ChannelzParentID sets the addrConn id which initiate the creation of this client transport. + ChannelzParentID int64 +} + +// TargetInfo contains the information of the target such as network address and metadata. +type TargetInfo struct { + Addr string + Metadata interface{} + Authority string +} + +// NewClientTransport establishes the transport with the required ConnectOptions +// and returns it to the caller. +func NewClientTransport(connectCtx, ctx context.Context, target TargetInfo, opts ConnectOptions, onSuccess func()) (ClientTransport, error) { + return newHTTP2Client(connectCtx, ctx, target, opts, onSuccess) +} + +// Options provides additional hints and information for message +// transmission. +type Options struct { + // Last indicates whether this write is the last piece for + // this stream. + Last bool + + // Delay is a hint to the transport implementation for whether + // the data could be buffered for a batching write. The + // transport implementation may ignore the hint. + Delay bool +} + +// CallHdr carries the information of a particular RPC. +type CallHdr struct { + // Host specifies the peer's host. + Host string + + // Method specifies the operation to perform. + Method string + + // SendCompress specifies the compression algorithm applied on + // outbound message. + SendCompress string + + // Creds specifies credentials.PerRPCCredentials for a call. + Creds credentials.PerRPCCredentials + + // Flush indicates whether a new stream command should be sent + // to the peer without waiting for the first data. This is + // only a hint. + // If it's true, the transport may modify the flush decision + // for performance purposes. + // If it's false, new stream will never be flushed. + Flush bool + + // ContentSubtype specifies the content-subtype for a request. For example, a + // content-subtype of "proto" will result in a content-type of + // "application/grpc+proto". The value of ContentSubtype must be all + // lowercase, otherwise the behavior is undefined. See + // https://github.com/grpc/grpc/blob/master/doc/PROTOCOL-HTTP2.md#requests + // for more details. + ContentSubtype string +} + +// ClientTransport is the common interface for all gRPC client-side transport +// implementations. +type ClientTransport interface { + // Close tears down this transport. Once it returns, the transport + // should not be accessed any more. The caller must make sure this + // is called only once. + Close() error + + // GracefulClose starts to tear down the transport. It stops accepting + // new RPCs and wait the completion of the pending RPCs. + GracefulClose() error + + // Write sends the data for the given stream. A nil stream indicates + // the write is to be performed on the transport as a whole. + Write(s *Stream, hdr []byte, data []byte, opts *Options) error + + // NewStream creates a Stream for an RPC. + NewStream(ctx context.Context, callHdr *CallHdr) (*Stream, error) + + // CloseStream clears the footprint of a stream when the stream is + // not needed any more. The err indicates the error incurred when + // CloseStream is called. Must be called when a stream is finished + // unless the associated transport is closing. + CloseStream(stream *Stream, err error) + + // Error returns a channel that is closed when some I/O error + // happens. Typically the caller should have a goroutine to monitor + // this in order to take action (e.g., close the current transport + // and create a new one) in error case. It should not return nil + // once the transport is initiated. + Error() <-chan struct{} + + // GoAway returns a channel that is closed when ClientTransport + // receives the draining signal from the server (e.g., GOAWAY frame in + // HTTP/2). + GoAway() <-chan struct{} + + // GetGoAwayReason returns the reason why GoAway frame was received. + GetGoAwayReason() GoAwayReason + + // IncrMsgSent increments the number of message sent through this transport. + IncrMsgSent() + + // IncrMsgRecv increments the number of message received through this transport. + IncrMsgRecv() +} + +// ServerTransport is the common interface for all gRPC server-side transport +// implementations. +// +// Methods may be called concurrently from multiple goroutines, but +// Write methods for a given Stream will be called serially. +type ServerTransport interface { + // HandleStreams receives incoming streams using the given handler. + HandleStreams(func(*Stream), func(context.Context, string) context.Context) + + // WriteHeader sends the header metadata for the given stream. + // WriteHeader may not be called on all streams. + WriteHeader(s *Stream, md metadata.MD) error + + // Write sends the data for the given stream. + // Write may not be called on all streams. + Write(s *Stream, hdr []byte, data []byte, opts *Options) error + + // WriteStatus sends the status of a stream to the client. WriteStatus is + // the final call made on a stream and always occurs. + WriteStatus(s *Stream, st *status.Status) error + + // Close tears down the transport. Once it is called, the transport + // should not be accessed any more. All the pending streams and their + // handlers will be terminated asynchronously. + Close() error + + // RemoteAddr returns the remote network address. + RemoteAddr() net.Addr + + // Drain notifies the client this ServerTransport stops accepting new RPCs. + Drain() + + // IncrMsgSent increments the number of message sent through this transport. + IncrMsgSent() + + // IncrMsgRecv increments the number of message received through this transport. + IncrMsgRecv() +} + +// streamErrorf creates an StreamError with the specified error code and description. +func streamErrorf(c codes.Code, format string, a ...interface{}) StreamError { + return StreamError{ + Code: c, + Desc: fmt.Sprintf(format, a...), + } +} + +// connectionErrorf creates an ConnectionError with the specified error description. +func connectionErrorf(temp bool, e error, format string, a ...interface{}) ConnectionError { + return ConnectionError{ + Desc: fmt.Sprintf(format, a...), + temp: temp, + err: e, + } +} + +// ConnectionError is an error that results in the termination of the +// entire connection and the retry of all the active streams. +type ConnectionError struct { + Desc string + temp bool + err error +} + +func (e ConnectionError) Error() string { + return fmt.Sprintf("connection error: desc = %q", e.Desc) +} + +// Temporary indicates if this connection error is temporary or fatal. +func (e ConnectionError) Temporary() bool { + return e.temp +} + +// Origin returns the original error of this connection error. +func (e ConnectionError) Origin() error { + // Never return nil error here. + // If the original error is nil, return itself. + if e.err == nil { + return e + } + return e.err +} + +var ( + // ErrConnClosing indicates that the transport is closing. + ErrConnClosing = connectionErrorf(true, nil, "transport is closing") + // errStreamDrain indicates that the stream is rejected because the + // connection is draining. This could be caused by goaway or balancer + // removing the address. + errStreamDrain = streamErrorf(codes.Unavailable, "the connection is draining") + // errStreamDone is returned from write at the client side to indiacte application + // layer of an error. + errStreamDone = errors.New("the stream is done") + // StatusGoAway indicates that the server sent a GOAWAY that included this + // stream's ID in unprocessed RPCs. + statusGoAway = status.New(codes.Unavailable, "the stream is rejected because server is draining the connection") +) + +// TODO: See if we can replace StreamError with status package errors. + +// StreamError is an error that only affects one stream within a connection. +type StreamError struct { + Code codes.Code + Desc string +} + +func (e StreamError) Error() string { + return fmt.Sprintf("stream error: code = %s desc = %q", e.Code, e.Desc) +} + +// GoAwayReason contains the reason for the GoAway frame received. +type GoAwayReason uint8 + +const ( + // GoAwayInvalid indicates that no GoAway frame is received. + GoAwayInvalid GoAwayReason = 0 + // GoAwayNoReason is the default value when GoAway frame is received. + GoAwayNoReason GoAwayReason = 1 + // GoAwayTooManyPings indicates that a GoAway frame with + // ErrCodeEnhanceYourCalm was received and that the debug data said + // "too_many_pings". + GoAwayTooManyPings GoAwayReason = 2 +) diff --git a/vendor/google.golang.org/grpc/vet.sh b/vendor/google.golang.org/grpc/vet.sh new file mode 100755 index 00000000000..6126ab64bd1 --- /dev/null +++ b/vendor/google.golang.org/grpc/vet.sh @@ -0,0 +1,93 @@ +#!/bin/bash + +if [[ `uname -a` = *"Darwin"* ]]; then + echo "It seems you are running on Mac. This script does not work on Mac. See https://github.com/grpc/grpc-go/issues/2047" + exit 1 +fi + +set -ex # Exit on error; debugging enabled. +set -o pipefail # Fail a pipe if any sub-command fails. + +die() { + echo "$@" >&2 + exit 1 +} + +PATH="$GOPATH/bin:$GOROOT/bin:$PATH" + +# Check proto in manual runs or cron runs. +if [[ "$TRAVIS" != "true" || "$TRAVIS_EVENT_TYPE" = "cron" ]]; then + check_proto="true" +fi + +if [ "$1" = "-install" ]; then + go get -d \ + google.golang.org/grpc/... + go get -u \ + github.com/golang/lint/golint \ + golang.org/x/tools/cmd/goimports \ + honnef.co/go/tools/cmd/staticcheck \ + github.com/client9/misspell/cmd/misspell \ + github.com/golang/protobuf/protoc-gen-go + if [[ "$check_proto" = "true" ]]; then + if [[ "$TRAVIS" = "true" ]]; then + PROTOBUF_VERSION=3.3.0 + PROTOC_FILENAME=protoc-${PROTOBUF_VERSION}-linux-x86_64.zip + pushd /home/travis + wget https://github.com/google/protobuf/releases/download/v${PROTOBUF_VERSION}/${PROTOC_FILENAME} + unzip ${PROTOC_FILENAME} + bin/protoc --version + popd + elif ! which protoc > /dev/null; then + die "Please install protoc into your path" + fi + fi + exit 0 +elif [[ "$#" -ne 0 ]]; then + die "Unknown argument(s): $*" +fi + +# TODO: Remove this check and the mangling below once "context" is imported +# directly. +if git status --porcelain | read; then + die "Uncommitted or untracked files found; commit changes first" +fi + +git ls-files "*.go" | xargs grep -L "\(Copyright [0-9]\{4,\} gRPC authors\)\|DO NOT EDIT" 2>&1 | tee /dev/stderr | (! read) +git ls-files "*.go" | xargs grep -l "\"unsafe\"" 2>&1 | (! grep -v '_test.go') | tee /dev/stderr | (! read) +gofmt -s -d -l . 2>&1 | tee /dev/stderr | (! read) +goimports -l . 2>&1 | tee /dev/stderr | (! read) +golint ./... 2>&1 | (grep -vE "(_mock|\.pb)\.go:" || true) | tee /dev/stderr | (! read) + +# Undo any edits made by this script. +cleanup() { + git reset --hard HEAD +} +trap cleanup EXIT + +# Rewrite golang.org/x/net/context -> context imports (see grpc/grpc-go#1484). +# TODO: Remove this mangling once "context" is imported directly (grpc/grpc-go#711). +git ls-files "*.go" | xargs sed -i 's:"golang.org/x/net/context":"context":' +set +o pipefail +# TODO: Stop filtering pb.go files once golang/protobuf#214 is fixed. +go tool vet -all . 2>&1 | grep -vE '(clientconn|transport\/transport_test).go:.*cancel (function|var)' | grep -vF '.pb.go:' | tee /dev/stderr | (! read) +set -o pipefail +git reset --hard HEAD + +if [[ "$check_proto" = "true" ]]; then + PATH="/home/travis/bin:$PATH" make proto && \ + git status --porcelain 2>&1 | (! read) || \ + (git status; git --no-pager diff; exit 1) +fi + +# TODO(menghanl): fix errors in transport_test. +staticcheck -ignore ' +google.golang.org/grpc/transport/transport_test.go:SA2002 +google.golang.org/grpc/benchmark/benchmain/main.go:SA1019 +google.golang.org/grpc/stats/stats_test.go:SA1019 +google.golang.org/grpc/test/end2end_test.go:SA1019 +google.golang.org/grpc/balancer_test.go:SA1019 +google.golang.org/grpc/balancer.go:SA1019 +google.golang.org/grpc/clientconn_test.go:SA1019 +' ./... +misspell -error . diff --git a/vendor/gopkg.in/inf.v0/dec.go b/vendor/gopkg.in/inf.v0/dec.go index 3b4afedf1a3..26548b63cef 100644 --- a/vendor/gopkg.in/inf.v0/dec.go +++ b/vendor/gopkg.in/inf.v0/dec.go @@ -104,7 +104,7 @@ var bigInt = [...]*big.Int{ var exp10cache [64]big.Int = func() [64]big.Int { e10, e10i := [64]big.Int{}, bigInt[1] - for i, _ := range e10 { + for i := range e10 { e10[i].Set(e10i) e10i = new(big.Int).Mul(e10i, bigInt[10]) } diff --git a/vendor/gopkg.in/yaml.v2/README.md b/vendor/gopkg.in/yaml.v2/README.md index 2ed3314c739..b50c6e87755 100644 --- a/vendor/gopkg.in/yaml.v2/README.md +++ b/vendor/gopkg.in/yaml.v2/README.md @@ -48,8 +48,6 @@ The yaml package is licensed under the Apache License 2.0. Please see the LICENS Example ------- -Some more examples can be found in the "examples" folder. - ```Go package main diff --git a/vendor/gopkg.in/yaml.v2/apic.go b/vendor/gopkg.in/yaml.v2/apic.go index 3e24a0d7d27..1f7e87e6727 100644 --- a/vendor/gopkg.in/yaml.v2/apic.go +++ b/vendor/gopkg.in/yaml.v2/apic.go @@ -468,7 +468,7 @@ func yaml_event_delete(event *yaml_event_t) { // } context // tag_directive *yaml_tag_directive_t // -// context.error = YAML_NO_ERROR // Eliminate a compliler warning. +// context.error = YAML_NO_ERROR // Eliminate a compiler warning. // // assert(document) // Non-NULL document object is expected. // diff --git a/vendor/gopkg.in/yaml.v2/decode.go b/vendor/gopkg.in/yaml.v2/decode.go index c8eac164287..e4e56e28e0e 100644 --- a/vendor/gopkg.in/yaml.v2/decode.go +++ b/vendor/gopkg.in/yaml.v2/decode.go @@ -113,6 +113,10 @@ func (p *parser) fail() { var line int if p.parser.problem_mark.line != 0 { line = p.parser.problem_mark.line + // Scanner errors don't iterate line before returning error + if p.parser.error == yaml_SCANNER_ERROR { + line++ + } } else if p.parser.context_mark.line != 0 { line = p.parser.context_mark.line } @@ -430,6 +434,7 @@ func (d *decoder) scalar(n *node, out reflect.Value) bool { // reasons we set it as a string, so that code that unmarshals // timestamp-like values into interface{} will continue to // see a string and not a time.Time. + // TODO(v3) Drop this. out.Set(reflect.ValueOf(n.value)) } else { out.Set(reflect.ValueOf(resolved)) @@ -542,6 +547,10 @@ func (d *decoder) sequence(n *node, out reflect.Value) (good bool) { switch out.Kind() { case reflect.Slice: out.Set(reflect.MakeSlice(out.Type(), l, l)) + case reflect.Array: + if l != out.Len() { + failf("invalid array: want %d elements but got %d", out.Len(), l) + } case reflect.Interface: // No type hints. Will have to use a generic sequence. iface = out @@ -560,7 +569,9 @@ func (d *decoder) sequence(n *node, out reflect.Value) (good bool) { j++ } } - out.Set(out.Slice(0, j)) + if out.Kind() != reflect.Array { + out.Set(out.Slice(0, j)) + } if iface.IsValid() { iface.Set(out) } diff --git a/vendor/gopkg.in/yaml.v2/emitterc.go b/vendor/gopkg.in/yaml.v2/emitterc.go index cf0db118ac9..a1c2cc52627 100644 --- a/vendor/gopkg.in/yaml.v2/emitterc.go +++ b/vendor/gopkg.in/yaml.v2/emitterc.go @@ -843,7 +843,7 @@ func yaml_emitter_select_scalar_style(emitter *yaml_emitter_t, event *yaml_event return true } -// Write an achor. +// Write an anchor. func yaml_emitter_process_anchor(emitter *yaml_emitter_t) bool { if emitter.anchor_data.anchor == nil { return true diff --git a/vendor/gopkg.in/yaml.v2/encode.go b/vendor/gopkg.in/yaml.v2/encode.go index 1e730eff6a8..a14435e82f8 100644 --- a/vendor/gopkg.in/yaml.v2/encode.go +++ b/vendor/gopkg.in/yaml.v2/encode.go @@ -131,7 +131,7 @@ func (e *encoder) marshal(tag string, in reflect.Value) { } else { e.structv(tag, in) } - case reflect.Slice: + case reflect.Slice, reflect.Array: if in.Type().Elem() == mapItemType { e.itemsv(tag, in) } else { @@ -328,14 +328,18 @@ func (e *encoder) uintv(tag string, in reflect.Value) { func (e *encoder) timev(tag string, in reflect.Value) { t := in.Interface().(time.Time) - if tag == "" { - tag = yaml_TIMESTAMP_TAG - } - e.emitScalar(t.Format(time.RFC3339Nano), "", tag, yaml_PLAIN_SCALAR_STYLE) + s := t.Format(time.RFC3339Nano) + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) } func (e *encoder) floatv(tag string, in reflect.Value) { - s := strconv.FormatFloat(in.Float(), 'g', -1, 64) + // Issue #352: When formatting, use the precision of the underlying value + precision := 64 + if in.Kind() == reflect.Float32 { + precision = 32 + } + + s := strconv.FormatFloat(in.Float(), 'g', -1, precision) switch s { case "+Inf": s = ".inf" diff --git a/vendor/gopkg.in/yaml.v2/readerc.go b/vendor/gopkg.in/yaml.v2/readerc.go index f450791717b..7c1f5fac3db 100644 --- a/vendor/gopkg.in/yaml.v2/readerc.go +++ b/vendor/gopkg.in/yaml.v2/readerc.go @@ -93,9 +93,18 @@ func yaml_parser_update_buffer(parser *yaml_parser_t, length int) bool { panic("read handler must be set") } + // [Go] This function was changed to guarantee the requested length size at EOF. + // The fact we need to do this is pretty awful, but the description above implies + // for that to be the case, and there are tests + // If the EOF flag is set and the raw buffer is empty, do nothing. if parser.eof && parser.raw_buffer_pos == len(parser.raw_buffer) { - return true + // [Go] ACTUALLY! Read the documentation of this function above. + // This is just broken. To return true, we need to have the + // given length in the buffer. Not doing that means every single + // check that calls this function to make sure the buffer has a + // given length is Go) panicking; or C) accessing invalid memory. + //return true } // Return if the buffer contains enough characters. @@ -389,6 +398,15 @@ func yaml_parser_update_buffer(parser *yaml_parser_t, length int) bool { break } } + // [Go] Read the documentation of this function above. To return true, + // we need to have the given length in the buffer. Not doing that means + // every single check that calls this function to make sure the buffer + // has a given length is Go) panicking; or C) accessing invalid memory. + // This happens here due to the EOF above breaking early. + for buffer_len < length { + parser.buffer[buffer_len] = 0 + buffer_len++ + } parser.buffer = parser.buffer[:buffer_len] return true } diff --git a/vendor/gopkg.in/yaml.v2/resolve.go b/vendor/gopkg.in/yaml.v2/resolve.go index ea90bd5e0de..6c151db6fbd 100644 --- a/vendor/gopkg.in/yaml.v2/resolve.go +++ b/vendor/gopkg.in/yaml.v2/resolve.go @@ -92,6 +92,19 @@ func resolve(tag string, in string) (rtag string, out interface{}) { switch tag { case "", rtag, yaml_STR_TAG, yaml_BINARY_TAG: return + case yaml_FLOAT_TAG: + if rtag == yaml_INT_TAG { + switch v := out.(type) { + case int64: + rtag = yaml_FLOAT_TAG + out = float64(v) + return + case int: + rtag = yaml_FLOAT_TAG + out = float64(v) + return + } + } } failf("cannot decode %s `%s` as a %s", shortTag(rtag), in, shortTag(tag)) }() @@ -167,12 +180,12 @@ func resolve(tag string, in string) (rtag string, out interface{}) { return yaml_INT_TAG, uintv } } else if strings.HasPrefix(plain, "-0b") { - intv, err := strconv.ParseInt(plain[3:], 2, 64) + intv, err := strconv.ParseInt("-" + plain[3:], 2, 64) if err == nil { - if intv == int64(int(intv)) { - return yaml_INT_TAG, -int(intv) + if true || intv == int64(int(intv)) { + return yaml_INT_TAG, int(intv) } else { - return yaml_INT_TAG, -intv + return yaml_INT_TAG, intv } } } @@ -211,10 +224,10 @@ func encodeBase64(s string) string { // This is a subset of the formats allowed by the regular expression // defined at http://yaml.org/type/timestamp.html. var allowedTimestampFormats = []string{ - "2006-1-2T15:4:5Z07:00", - "2006-1-2t15:4:5Z07:00", // RFC3339 with lower-case "t". - "2006-1-2 15:4:5", // space separated with no time zone - "2006-1-2", // date only + "2006-1-2T15:4:5.999999999Z07:00", // RCF3339Nano with short date fields. + "2006-1-2t15:4:5.999999999Z07:00", // RFC3339Nano with short date fields and lower-case "t". + "2006-1-2 15:4:5.999999999", // space separated with no time zone + "2006-1-2", // date only // Notable exception: time.Parse cannot handle: "2001-12-14 21:59:43.10 -5" // from the set of examples. } diff --git a/vendor/gopkg.in/yaml.v2/scannerc.go b/vendor/gopkg.in/yaml.v2/scannerc.go index 492a9845dac..077fd1dd2d4 100644 --- a/vendor/gopkg.in/yaml.v2/scannerc.go +++ b/vendor/gopkg.in/yaml.v2/scannerc.go @@ -871,12 +871,6 @@ func yaml_parser_save_simple_key(parser *yaml_parser_t) bool { required := parser.flow_level == 0 && parser.indent == parser.mark.column - // A simple key is required only when it is the first token in the current - // line. Therefore it is always allowed. But we add a check anyway. - if required && !parser.simple_key_allowed { - panic("should not happen") - } - // // If the current position may start a simple key, save it. // @@ -2475,6 +2469,10 @@ func yaml_parser_scan_flow_scalar(parser *yaml_parser_t, token *yaml_token_t, si } } + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + // Check if we are at the end of the scalar. if single { if parser.buffer[parser.buffer_pos] == '\'' { @@ -2487,10 +2485,6 @@ func yaml_parser_scan_flow_scalar(parser *yaml_parser_t, token *yaml_token_t, si } // Consume blank characters. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) { if is_blank(parser.buffer, parser.buffer_pos) { // Consume a space or a tab character. @@ -2647,10 +2641,10 @@ func yaml_parser_scan_plain_scalar(parser *yaml_parser_t, token *yaml_token_t) b for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) { if is_blank(parser.buffer, parser.buffer_pos) { - // Check for tab character that abuse indentation. + // Check for tab characters that abuse indentation. if leading_blanks && parser.mark.column < indent && is_tab(parser.buffer, parser.buffer_pos) { yaml_parser_set_scanner_error(parser, "while scanning a plain scalar", - start_mark, "found a tab character that violate indentation") + start_mark, "found a tab character that violates indentation") return false } diff --git a/vendor/gopkg.in/yaml.v2/sorter.go b/vendor/gopkg.in/yaml.v2/sorter.go index 5958822f9c6..4c45e660a8f 100644 --- a/vendor/gopkg.in/yaml.v2/sorter.go +++ b/vendor/gopkg.in/yaml.v2/sorter.go @@ -51,6 +51,15 @@ func (l keyList) Less(i, j int) bool { } var ai, bi int var an, bn int64 + if ar[i] == '0' || br[i] == '0' { + for j := i-1; j >= 0 && unicode.IsDigit(ar[j]); j-- { + if ar[j] != '0' { + an = 1 + bn = 1 + break + } + } + } for ai = i; ai < len(ar) && unicode.IsDigit(ar[ai]); ai++ { an = an*10 + int64(ar[ai]-'0') } diff --git a/vendor/gopkg.in/yaml.v2/yaml.go b/vendor/gopkg.in/yaml.v2/yaml.go index 483aae58781..de85aa4cdb7 100644 --- a/vendor/gopkg.in/yaml.v2/yaml.go +++ b/vendor/gopkg.in/yaml.v2/yaml.go @@ -157,8 +157,8 @@ func unmarshal(in []byte, out interface{}, strict bool) (err error) { // of the generated document will reflect the structure of the value itself. // Maps and pointers (to struct, string, int, etc) are accepted as the in value. // -// Struct fields are only unmarshalled if they are exported (have an upper case -// first letter), and are unmarshalled using the field name lowercased as the +// Struct fields are only marshalled if they are exported (have an upper case +// first letter), and are marshalled using the field name lowercased as the // default key. Custom keys may be defined via the "yaml" name in the field // tag: the content preceding the first comma is used as the key, and the // following comma-separated options are used to tweak the marshalling process. diff --git a/vendor/k8s.io/helm/pkg/chartutil/chartfile.go b/vendor/k8s.io/helm/pkg/chartutil/chartfile.go index 9897d66ff04..c2879cdae57 100644 --- a/vendor/k8s.io/helm/pkg/chartutil/chartfile.go +++ b/vendor/k8s.io/helm/pkg/chartutil/chartfile.go @@ -31,7 +31,7 @@ import ( // ApiVersionV1 is the API version number for version 1. // // This is ApiVersionV1 instead of APIVersionV1 to match the protobuf-generated name. -const ApiVersionV1 = "v1" +const ApiVersionV1 = "v1" // nolint // UnmarshalChartfile takes raw Chart.yaml data and unmarshals it. func UnmarshalChartfile(data []byte) (*chart.Metadata, error) { diff --git a/vendor/k8s.io/helm/pkg/chartutil/create.go b/vendor/k8s.io/helm/pkg/chartutil/create.go index 319a75e2f50..ec84ba7729f 100644 --- a/vendor/k8s.io/helm/pkg/chartutil/create.go +++ b/vendor/k8s.io/helm/pkg/chartutil/create.go @@ -120,7 +120,6 @@ const defaultIgnore = `# Patterns to ignore when building packages. const defaultIngress = `{{- if .Values.ingress.enabled -}} {{- $fullName := include ".fullname" . -}} -{{- $servicePort := .Values.service.port -}} {{- $ingressPath := .Values.ingress.path -}} apiVersion: extensions/v1beta1 kind: Ingress diff --git a/vendor/k8s.io/helm/pkg/chartutil/expand.go b/vendor/k8s.io/helm/pkg/chartutil/expand.go index ae28f814752..126e14e8005 100644 --- a/vendor/k8s.io/helm/pkg/chartutil/expand.go +++ b/vendor/k8s.io/helm/pkg/chartutil/expand.go @@ -63,11 +63,12 @@ func Expand(dir string, r io.Reader) error { if err != nil { return err } - defer file.Close() _, err = io.Copy(file, tr) if err != nil { + file.Close() return err } + file.Close() } return nil } diff --git a/vendor/k8s.io/helm/pkg/chartutil/files.go b/vendor/k8s.io/helm/pkg/chartutil/files.go index 687a9a8d61d..a711a336626 100644 --- a/vendor/k8s.io/helm/pkg/chartutil/files.go +++ b/vendor/k8s.io/helm/pkg/chartutil/files.go @@ -220,10 +220,10 @@ func ToJson(v interface{}) string { return string(data) } -// FromJson converts a YAML document into a map[string]interface{}. +// FromJson converts a JSON document into a map[string]interface{}. // // This is not a general-purpose JSON parser, and will not parse all valid -// YAML documents. Additionally, because its intended use is within templates +// JSON documents. Additionally, because its intended use is within templates // it tolerates errors. It will insert the returned error message string into // m["Error"] in the returned map. func FromJson(str string) map[string]interface{} { diff --git a/vendor/k8s.io/helm/pkg/chartutil/save.go b/vendor/k8s.io/helm/pkg/chartutil/save.go index b89917d9633..bff32dde519 100644 --- a/vendor/k8s.io/helm/pkg/chartutil/save.go +++ b/vendor/k8s.io/helm/pkg/chartutil/save.go @@ -70,6 +70,12 @@ func SaveDir(c *chart.Chart, dest string) error { // Save files for _, f := range c.Files { n := filepath.Join(outdir, f.TypeUrl) + + d := filepath.Dir(n) + if err := os.MkdirAll(d, 0755); err != nil { + return err + } + if err := ioutil.WriteFile(n, f.Value, 0755); err != nil { return err } diff --git a/vendor/k8s.io/helm/pkg/getter/httpgetter.go b/vendor/k8s.io/helm/pkg/getter/httpgetter.go index 5a2146ec6f8..3c20e35e199 100644 --- a/vendor/k8s.io/helm/pkg/getter/httpgetter.go +++ b/vendor/k8s.io/helm/pkg/getter/httpgetter.go @@ -27,13 +27,26 @@ import ( "k8s.io/helm/pkg/version" ) -//httpGetter is the efault HTTP(/S) backend handler -type httpGetter struct { - client *http.Client +//HttpGetter is the efault HTTP(/S) backend handler +// TODO: change the name to HTTPGetter in Helm 3 +type HttpGetter struct { //nolint + client *http.Client + username string + password string +} + +//SetCredentials sets the credentials for the getter +func (g *HttpGetter) SetCredentials(username, password string) { + g.username = username + g.password = password } //Get performs a Get from repo.Getter and returns the body. -func (g *httpGetter) Get(href string) (*bytes.Buffer, error) { +func (g *HttpGetter) Get(href string) (*bytes.Buffer, error) { + return g.get(href) +} + +func (g *HttpGetter) get(href string) (*bytes.Buffer, error) { buf := bytes.NewBuffer(nil) // Set a helm specific user agent so that a repo server and metrics can @@ -44,6 +57,10 @@ func (g *httpGetter) Get(href string) (*bytes.Buffer, error) { } req.Header.Set("User-Agent", "Helm/"+strings.TrimPrefix(version.GetVersion(), "v")) + if g.username != "" && g.password != "" { + req.SetBasicAuth(g.username, g.password) + } + resp, err := g.client.Do(req) if err != nil { return buf, err @@ -59,23 +76,29 @@ func (g *httpGetter) Get(href string) (*bytes.Buffer, error) { // newHTTPGetter constructs a valid http/https client as Getter func newHTTPGetter(URL, CertFile, KeyFile, CAFile string) (Getter, error) { - var client httpGetter + return NewHTTPGetter(URL, CertFile, KeyFile, CAFile) +} + +// NewHTTPGetter constructs a valid http/https client as HttpGetter +func NewHTTPGetter(URL, CertFile, KeyFile, CAFile string) (*HttpGetter, error) { + var client HttpGetter if CertFile != "" && KeyFile != "" { tlsConf, err := tlsutil.NewClientTLS(CertFile, KeyFile, CAFile) if err != nil { - return nil, fmt.Errorf("can't create TLS config for client: %s", err.Error()) + return &client, fmt.Errorf("can't create TLS config for client: %s", err.Error()) } tlsConf.BuildNameToCertificate() sni, err := urlutil.ExtractHostname(URL) if err != nil { - return nil, err + return &client, err } tlsConf.ServerName = sni client.client = &http.Client{ Transport: &http.Transport{ TLSClientConfig: tlsConf, + Proxy: http.ProxyFromEnvironment, }, } } else { diff --git a/vendor/k8s.io/helm/pkg/helm/client.go b/vendor/k8s.io/helm/pkg/helm/client.go new file mode 100644 index 00000000000..43e9f4dafa2 --- /dev/null +++ b/vendor/k8s.io/helm/pkg/helm/client.go @@ -0,0 +1,522 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package helm // import "k8s.io/helm/pkg/helm" + +import ( + "fmt" + "io" + "time" + + "golang.org/x/net/context" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/keepalive" + + healthpb "google.golang.org/grpc/health/grpc_health_v1" + "k8s.io/helm/pkg/chartutil" + "k8s.io/helm/pkg/proto/hapi/chart" + rls "k8s.io/helm/pkg/proto/hapi/services" +) + +// maxMsgSize use 20MB as the default message size limit. +// grpc library default is 4MB +const maxMsgSize = 1024 * 1024 * 20 + +// Client manages client side of the Helm-Tiller protocol. +type Client struct { + opts options +} + +// NewClient creates a new client. +func NewClient(opts ...Option) *Client { + var c Client + // set some sane defaults + c.Option(ConnectTimeout(5)) + return c.Option(opts...) +} + +// Option configures the Helm client with the provided options. +func (h *Client) Option(opts ...Option) *Client { + for _, opt := range opts { + opt(&h.opts) + } + return h +} + +// ListReleases lists the current releases. +func (h *Client) ListReleases(opts ...ReleaseListOption) (*rls.ListReleasesResponse, error) { + reqOpts := h.opts + for _, opt := range opts { + opt(&reqOpts) + } + req := &reqOpts.listReq + ctx := NewContext() + + if reqOpts.before != nil { + if err := reqOpts.before(ctx, req); err != nil { + return nil, err + } + } + return h.list(ctx, req) +} + +// InstallRelease loads a chart from chstr, installs it, and returns the release response. +func (h *Client) InstallRelease(chstr, ns string, opts ...InstallOption) (*rls.InstallReleaseResponse, error) { + // load the chart to install + chart, err := chartutil.Load(chstr) + if err != nil { + return nil, err + } + + return h.InstallReleaseFromChart(chart, ns, opts...) +} + +// InstallReleaseFromChart installs a new chart and returns the release response. +func (h *Client) InstallReleaseFromChart(chart *chart.Chart, ns string, opts ...InstallOption) (*rls.InstallReleaseResponse, error) { + // apply the install options + reqOpts := h.opts + for _, opt := range opts { + opt(&reqOpts) + } + req := &reqOpts.instReq + req.Chart = chart + req.Namespace = ns + req.DryRun = reqOpts.dryRun + req.DisableHooks = reqOpts.disableHooks + req.ReuseName = reqOpts.reuseName + ctx := NewContext() + + if reqOpts.before != nil { + if err := reqOpts.before(ctx, req); err != nil { + return nil, err + } + } + err := chartutil.ProcessRequirementsEnabled(req.Chart, req.Values) + if err != nil { + return nil, err + } + err = chartutil.ProcessRequirementsImportValues(req.Chart) + if err != nil { + return nil, err + } + + return h.install(ctx, req) +} + +// DeleteRelease uninstalls a named release and returns the response. +func (h *Client) DeleteRelease(rlsName string, opts ...DeleteOption) (*rls.UninstallReleaseResponse, error) { + // apply the uninstall options + reqOpts := h.opts + for _, opt := range opts { + opt(&reqOpts) + } + + if reqOpts.dryRun { + // In the dry run case, just see if the release exists + r, err := h.ReleaseContent(rlsName) + if err != nil { + return &rls.UninstallReleaseResponse{}, err + } + return &rls.UninstallReleaseResponse{Release: r.Release}, nil + } + + req := &reqOpts.uninstallReq + req.Name = rlsName + req.DisableHooks = reqOpts.disableHooks + ctx := NewContext() + + if reqOpts.before != nil { + if err := reqOpts.before(ctx, req); err != nil { + return nil, err + } + } + return h.delete(ctx, req) +} + +// UpdateRelease loads a chart from chstr and updates a release to a new/different chart. +func (h *Client) UpdateRelease(rlsName string, chstr string, opts ...UpdateOption) (*rls.UpdateReleaseResponse, error) { + // load the chart to update + chart, err := chartutil.Load(chstr) + if err != nil { + return nil, err + } + + return h.UpdateReleaseFromChart(rlsName, chart, opts...) +} + +// UpdateReleaseFromChart updates a release to a new/different chart. +func (h *Client) UpdateReleaseFromChart(rlsName string, chart *chart.Chart, opts ...UpdateOption) (*rls.UpdateReleaseResponse, error) { + // apply the update options + reqOpts := h.opts + for _, opt := range opts { + opt(&reqOpts) + } + req := &reqOpts.updateReq + req.Chart = chart + req.DryRun = reqOpts.dryRun + req.Name = rlsName + req.DisableHooks = reqOpts.disableHooks + req.Recreate = reqOpts.recreate + req.Force = reqOpts.force + req.ResetValues = reqOpts.resetValues + req.ReuseValues = reqOpts.reuseValues + ctx := NewContext() + + if reqOpts.before != nil { + if err := reqOpts.before(ctx, req); err != nil { + return nil, err + } + } + err := chartutil.ProcessRequirementsEnabled(req.Chart, req.Values) + if err != nil { + return nil, err + } + err = chartutil.ProcessRequirementsImportValues(req.Chart) + if err != nil { + return nil, err + } + + return h.update(ctx, req) +} + +// GetVersion returns the server version. +func (h *Client) GetVersion(opts ...VersionOption) (*rls.GetVersionResponse, error) { + reqOpts := h.opts + for _, opt := range opts { + opt(&reqOpts) + } + req := &rls.GetVersionRequest{} + ctx := NewContext() + + if reqOpts.before != nil { + if err := reqOpts.before(ctx, req); err != nil { + return nil, err + } + } + return h.version(ctx, req) +} + +// RollbackRelease rolls back a release to the previous version. +func (h *Client) RollbackRelease(rlsName string, opts ...RollbackOption) (*rls.RollbackReleaseResponse, error) { + reqOpts := h.opts + for _, opt := range opts { + opt(&reqOpts) + } + req := &reqOpts.rollbackReq + req.Recreate = reqOpts.recreate + req.Force = reqOpts.force + req.DisableHooks = reqOpts.disableHooks + req.DryRun = reqOpts.dryRun + req.Name = rlsName + ctx := NewContext() + + if reqOpts.before != nil { + if err := reqOpts.before(ctx, req); err != nil { + return nil, err + } + } + return h.rollback(ctx, req) +} + +// ReleaseStatus returns the given release's status. +func (h *Client) ReleaseStatus(rlsName string, opts ...StatusOption) (*rls.GetReleaseStatusResponse, error) { + reqOpts := h.opts + for _, opt := range opts { + opt(&reqOpts) + } + req := &reqOpts.statusReq + req.Name = rlsName + ctx := NewContext() + + if reqOpts.before != nil { + if err := reqOpts.before(ctx, req); err != nil { + return nil, err + } + } + return h.status(ctx, req) +} + +// ReleaseContent returns the configuration for a given release. +func (h *Client) ReleaseContent(rlsName string, opts ...ContentOption) (*rls.GetReleaseContentResponse, error) { + reqOpts := h.opts + for _, opt := range opts { + opt(&reqOpts) + } + req := &reqOpts.contentReq + req.Name = rlsName + ctx := NewContext() + + if reqOpts.before != nil { + if err := reqOpts.before(ctx, req); err != nil { + return nil, err + } + } + return h.content(ctx, req) +} + +// ReleaseHistory returns a release's revision history. +func (h *Client) ReleaseHistory(rlsName string, opts ...HistoryOption) (*rls.GetHistoryResponse, error) { + reqOpts := h.opts + for _, opt := range opts { + opt(&reqOpts) + } + + req := &reqOpts.histReq + req.Name = rlsName + ctx := NewContext() + + if reqOpts.before != nil { + if err := reqOpts.before(ctx, req); err != nil { + return nil, err + } + } + return h.history(ctx, req) +} + +// RunReleaseTest executes a pre-defined test on a release. +func (h *Client) RunReleaseTest(rlsName string, opts ...ReleaseTestOption) (<-chan *rls.TestReleaseResponse, <-chan error) { + reqOpts := h.opts + for _, opt := range opts { + opt(&reqOpts) + } + + req := &reqOpts.testReq + req.Name = rlsName + ctx := NewContext() + + return h.test(ctx, req) +} + +// PingTiller pings the Tiller pod and ensure's that it is up and running +func (h *Client) PingTiller() error { + ctx := NewContext() + return h.ping(ctx) +} + +// connect returns a gRPC connection to Tiller or error. The gRPC dial options +// are constructed here. +func (h *Client) connect(ctx context.Context) (conn *grpc.ClientConn, err error) { + opts := []grpc.DialOption{ + grpc.WithBlock(), + grpc.WithKeepaliveParams(keepalive.ClientParameters{ + // Send keepalive every 30 seconds to prevent the connection from + // getting closed by upstreams + Time: time.Duration(30) * time.Second, + }), + grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(maxMsgSize)), + } + switch { + case h.opts.useTLS: + opts = append(opts, grpc.WithTransportCredentials(credentials.NewTLS(h.opts.tlsConfig))) + default: + opts = append(opts, grpc.WithInsecure()) + } + ctx, cancel := context.WithTimeout(ctx, h.opts.connectTimeout) + defer cancel() + if conn, err = grpc.DialContext(ctx, h.opts.host, opts...); err != nil { + return nil, err + } + return conn, nil +} + +// Executes tiller.ListReleases RPC. +func (h *Client) list(ctx context.Context, req *rls.ListReleasesRequest) (*rls.ListReleasesResponse, error) { + c, err := h.connect(ctx) + if err != nil { + return nil, err + } + defer c.Close() + + rlc := rls.NewReleaseServiceClient(c) + s, err := rlc.ListReleases(ctx, req) + if err != nil { + return nil, err + } + var resp *rls.ListReleasesResponse + for { + r, err := s.Recv() + if err == io.EOF { + break + } + if err != nil { + return nil, err + } + if resp == nil { + resp = r + continue + } + resp.Releases = append(resp.Releases, r.GetReleases()[0]) + } + return resp, nil +} + +// Executes tiller.InstallRelease RPC. +func (h *Client) install(ctx context.Context, req *rls.InstallReleaseRequest) (*rls.InstallReleaseResponse, error) { + c, err := h.connect(ctx) + if err != nil { + return nil, err + } + defer c.Close() + + rlc := rls.NewReleaseServiceClient(c) + return rlc.InstallRelease(ctx, req) +} + +// Executes tiller.UninstallRelease RPC. +func (h *Client) delete(ctx context.Context, req *rls.UninstallReleaseRequest) (*rls.UninstallReleaseResponse, error) { + c, err := h.connect(ctx) + if err != nil { + return nil, err + } + defer c.Close() + + rlc := rls.NewReleaseServiceClient(c) + return rlc.UninstallRelease(ctx, req) +} + +// Executes tiller.UpdateRelease RPC. +func (h *Client) update(ctx context.Context, req *rls.UpdateReleaseRequest) (*rls.UpdateReleaseResponse, error) { + c, err := h.connect(ctx) + if err != nil { + return nil, err + } + defer c.Close() + + rlc := rls.NewReleaseServiceClient(c) + return rlc.UpdateRelease(ctx, req) +} + +// Executes tiller.RollbackRelease RPC. +func (h *Client) rollback(ctx context.Context, req *rls.RollbackReleaseRequest) (*rls.RollbackReleaseResponse, error) { + c, err := h.connect(ctx) + if err != nil { + return nil, err + } + defer c.Close() + + rlc := rls.NewReleaseServiceClient(c) + return rlc.RollbackRelease(ctx, req) +} + +// Executes tiller.GetReleaseStatus RPC. +func (h *Client) status(ctx context.Context, req *rls.GetReleaseStatusRequest) (*rls.GetReleaseStatusResponse, error) { + c, err := h.connect(ctx) + if err != nil { + return nil, err + } + defer c.Close() + + rlc := rls.NewReleaseServiceClient(c) + return rlc.GetReleaseStatus(ctx, req) +} + +// Executes tiller.GetReleaseContent RPC. +func (h *Client) content(ctx context.Context, req *rls.GetReleaseContentRequest) (*rls.GetReleaseContentResponse, error) { + c, err := h.connect(ctx) + if err != nil { + return nil, err + } + defer c.Close() + + rlc := rls.NewReleaseServiceClient(c) + return rlc.GetReleaseContent(ctx, req) +} + +// Executes tiller.GetVersion RPC. +func (h *Client) version(ctx context.Context, req *rls.GetVersionRequest) (*rls.GetVersionResponse, error) { + c, err := h.connect(ctx) + if err != nil { + return nil, err + } + defer c.Close() + + rlc := rls.NewReleaseServiceClient(c) + return rlc.GetVersion(ctx, req) +} + +// Executes tiller.GetHistory RPC. +func (h *Client) history(ctx context.Context, req *rls.GetHistoryRequest) (*rls.GetHistoryResponse, error) { + c, err := h.connect(ctx) + if err != nil { + return nil, err + } + defer c.Close() + + rlc := rls.NewReleaseServiceClient(c) + return rlc.GetHistory(ctx, req) +} + +// Executes tiller.TestRelease RPC. +func (h *Client) test(ctx context.Context, req *rls.TestReleaseRequest) (<-chan *rls.TestReleaseResponse, <-chan error) { + errc := make(chan error, 1) + c, err := h.connect(ctx) + if err != nil { + errc <- err + return nil, errc + } + + ch := make(chan *rls.TestReleaseResponse, 1) + go func() { + defer close(errc) + defer close(ch) + defer c.Close() + + rlc := rls.NewReleaseServiceClient(c) + s, err := rlc.RunReleaseTest(ctx, req) + if err != nil { + errc <- err + return + } + + for { + msg, err := s.Recv() + if err == io.EOF { + return + } + if err != nil { + errc <- err + return + } + ch <- msg + } + }() + + return ch, errc +} + +// Executes tiller.Ping RPC. +func (h *Client) ping(ctx context.Context) error { + c, err := h.connect(ctx) + if err != nil { + return err + } + defer c.Close() + + healthClient := healthpb.NewHealthClient(c) + resp, err := healthClient.Check(ctx, &healthpb.HealthCheckRequest{Service: "Tiller"}) + if err != nil { + return err + } + switch resp.GetStatus() { + case healthpb.HealthCheckResponse_SERVING: + return nil + case healthpb.HealthCheckResponse_NOT_SERVING: + return fmt.Errorf("tiller is not serving requests at this time, Please try again later") + default: + return fmt.Errorf("tiller healthcheck returned an unknown status") + } +} diff --git a/vendor/k8s.io/helm/pkg/helm/environment/environment.go b/vendor/k8s.io/helm/pkg/helm/environment/environment.go index 49d424b333c..2980e6dc9c9 100644 --- a/vendor/k8s.io/helm/pkg/helm/environment/environment.go +++ b/vendor/k8s.io/helm/pkg/helm/environment/environment.go @@ -39,6 +39,8 @@ var DefaultHelmHome = filepath.Join(homedir.HomeDir(), ".helm") type EnvSettings struct { // TillerHost is the host and port of Tiller. TillerHost string + // TillerConnectionTimeout is the duration (in seconds) helm will wait to establish a connection to tiller. + TillerConnectionTimeout int64 // TillerNamespace is the namespace in which Tiller runs. TillerNamespace string // Home is the local path to the Helm home directory. @@ -56,6 +58,7 @@ func (s *EnvSettings) AddFlags(fs *pflag.FlagSet) { fs.StringVar(&s.KubeContext, "kube-context", "", "name of the kubeconfig context to use") fs.BoolVar(&s.Debug, "debug", false, "enable verbose output") fs.StringVar(&s.TillerNamespace, "tiller-namespace", "kube-system", "namespace of Tiller") + fs.Int64Var(&s.TillerConnectionTimeout, "tiller-connection-timeout", int64(300), "the duration (in seconds) Helm will wait to establish a connection to tiller") } // Init sets values from the environment. diff --git a/vendor/k8s.io/helm/pkg/helm/fake.go b/vendor/k8s.io/helm/pkg/helm/fake.go new file mode 100644 index 00000000000..0a9e77c4406 --- /dev/null +++ b/vendor/k8s.io/helm/pkg/helm/fake.go @@ -0,0 +1,277 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package helm // import "k8s.io/helm/pkg/helm" + +import ( + "errors" + "fmt" + "math/rand" + "sync" + + "github.com/golang/protobuf/ptypes/timestamp" + "k8s.io/helm/pkg/proto/hapi/chart" + "k8s.io/helm/pkg/proto/hapi/release" + rls "k8s.io/helm/pkg/proto/hapi/services" + "k8s.io/helm/pkg/proto/hapi/version" +) + +// FakeClient implements Interface +type FakeClient struct { + Rels []*release.Release + Responses map[string]release.TestRun_Status + Opts options +} + +// Option returns the fake release client +func (c *FakeClient) Option(opts ...Option) Interface { + for _, opt := range opts { + opt(&c.Opts) + } + return c +} + +var _ Interface = &FakeClient{} +var _ Interface = (*FakeClient)(nil) + +// ListReleases lists the current releases +func (c *FakeClient) ListReleases(opts ...ReleaseListOption) (*rls.ListReleasesResponse, error) { + resp := &rls.ListReleasesResponse{ + Count: int64(len(c.Rels)), + Releases: c.Rels, + } + return resp, nil +} + +// InstallRelease creates a new release and returns a InstallReleaseResponse containing that release +func (c *FakeClient) InstallRelease(chStr, ns string, opts ...InstallOption) (*rls.InstallReleaseResponse, error) { + chart := &chart.Chart{} + return c.InstallReleaseFromChart(chart, ns, opts...) +} + +// InstallReleaseFromChart adds a new MockRelease to the fake client and returns a InstallReleaseResponse containing that release +func (c *FakeClient) InstallReleaseFromChart(chart *chart.Chart, ns string, opts ...InstallOption) (*rls.InstallReleaseResponse, error) { + for _, opt := range opts { + opt(&c.Opts) + } + + releaseName := c.Opts.instReq.Name + + // Check to see if the release already exists. + rel, err := c.ReleaseStatus(releaseName, nil) + if err == nil && rel != nil { + return nil, errors.New("cannot re-use a name that is still in use") + } + + release := ReleaseMock(&MockReleaseOptions{Name: releaseName, Namespace: ns}) + c.Rels = append(c.Rels, release) + + return &rls.InstallReleaseResponse{ + Release: release, + }, nil +} + +// DeleteRelease deletes a release from the FakeClient +func (c *FakeClient) DeleteRelease(rlsName string, opts ...DeleteOption) (*rls.UninstallReleaseResponse, error) { + for i, rel := range c.Rels { + if rel.Name == rlsName { + c.Rels = append(c.Rels[:i], c.Rels[i+1:]...) + return &rls.UninstallReleaseResponse{ + Release: rel, + }, nil + } + } + + return nil, fmt.Errorf("No such release: %s", rlsName) +} + +// GetVersion returns a fake version +func (c *FakeClient) GetVersion(opts ...VersionOption) (*rls.GetVersionResponse, error) { + return &rls.GetVersionResponse{ + Version: &version.Version{ + SemVer: "1.2.3-fakeclient+testonly", + }, + }, nil +} + +// UpdateRelease returns an UpdateReleaseResponse containing the updated release, if it exists +func (c *FakeClient) UpdateRelease(rlsName string, chStr string, opts ...UpdateOption) (*rls.UpdateReleaseResponse, error) { + return c.UpdateReleaseFromChart(rlsName, &chart.Chart{}, opts...) +} + +// UpdateReleaseFromChart returns an UpdateReleaseResponse containing the updated release, if it exists +func (c *FakeClient) UpdateReleaseFromChart(rlsName string, chart *chart.Chart, opts ...UpdateOption) (*rls.UpdateReleaseResponse, error) { + // Check to see if the release already exists. + rel, err := c.ReleaseContent(rlsName, nil) + if err != nil { + return nil, err + } + + return &rls.UpdateReleaseResponse{Release: rel.Release}, nil +} + +// RollbackRelease returns nil, nil +func (c *FakeClient) RollbackRelease(rlsName string, opts ...RollbackOption) (*rls.RollbackReleaseResponse, error) { + return nil, nil +} + +// ReleaseStatus returns a release status response with info from the matching release name. +func (c *FakeClient) ReleaseStatus(rlsName string, opts ...StatusOption) (*rls.GetReleaseStatusResponse, error) { + for _, rel := range c.Rels { + if rel.Name == rlsName { + return &rls.GetReleaseStatusResponse{ + Name: rel.Name, + Info: rel.Info, + Namespace: rel.Namespace, + }, nil + } + } + return nil, fmt.Errorf("No such release: %s", rlsName) +} + +// ReleaseContent returns the configuration for the matching release name in the fake release client. +func (c *FakeClient) ReleaseContent(rlsName string, opts ...ContentOption) (resp *rls.GetReleaseContentResponse, err error) { + for _, rel := range c.Rels { + if rel.Name == rlsName { + return &rls.GetReleaseContentResponse{ + Release: rel, + }, nil + } + } + return resp, fmt.Errorf("No such release: %s", rlsName) +} + +// ReleaseHistory returns a release's revision history. +func (c *FakeClient) ReleaseHistory(rlsName string, opts ...HistoryOption) (*rls.GetHistoryResponse, error) { + return &rls.GetHistoryResponse{Releases: c.Rels}, nil +} + +// RunReleaseTest executes a pre-defined tests on a release +func (c *FakeClient) RunReleaseTest(rlsName string, opts ...ReleaseTestOption) (<-chan *rls.TestReleaseResponse, <-chan error) { + + results := make(chan *rls.TestReleaseResponse) + errc := make(chan error, 1) + + go func() { + var wg sync.WaitGroup + for m, s := range c.Responses { + wg.Add(1) + + go func(msg string, status release.TestRun_Status) { + defer wg.Done() + results <- &rls.TestReleaseResponse{Msg: msg, Status: status} + }(m, s) + } + + wg.Wait() + close(results) + close(errc) + }() + + return results, errc +} + +// PingTiller pings the Tiller pod and ensure's that it is up and running +func (c *FakeClient) PingTiller() error { + return nil +} + +// MockHookTemplate is the hook template used for all mock release objects. +var MockHookTemplate = `apiVersion: v1 +kind: Job +metadata: + annotations: + "helm.sh/hook": pre-install +` + +// MockManifest is the manifest used for all mock release objects. +var MockManifest = `apiVersion: v1 +kind: Secret +metadata: + name: fixture +` + +// MockReleaseOptions allows for user-configurable options on mock release objects. +type MockReleaseOptions struct { + Name string + Version int32 + Chart *chart.Chart + StatusCode release.Status_Code + Namespace string +} + +// ReleaseMock creates a mock release object based on options set by MockReleaseOptions. This function should typically not be used outside of testing. +func ReleaseMock(opts *MockReleaseOptions) *release.Release { + date := timestamp.Timestamp{Seconds: 242085845, Nanos: 0} + + name := opts.Name + if name == "" { + name = "testrelease-" + string(rand.Intn(100)) + } + + var version int32 = 1 + if opts.Version != 0 { + version = opts.Version + } + + namespace := opts.Namespace + if namespace == "" { + namespace = "default" + } + + ch := opts.Chart + if opts.Chart == nil { + ch = &chart.Chart{ + Metadata: &chart.Metadata{ + Name: "foo", + Version: "0.1.0-beta.1", + }, + Templates: []*chart.Template{ + {Name: "templates/foo.tpl", Data: []byte(MockManifest)}, + }, + } + } + + scode := release.Status_DEPLOYED + if opts.StatusCode > 0 { + scode = opts.StatusCode + } + + return &release.Release{ + Name: name, + Info: &release.Info{ + FirstDeployed: &date, + LastDeployed: &date, + Status: &release.Status{Code: scode}, + Description: "Release mock", + }, + Chart: ch, + Config: &chart.Config{Raw: `name: "value"`}, + Version: version, + Namespace: namespace, + Hooks: []*release.Hook{ + { + Name: "pre-install-hook", + Kind: "Job", + Path: "pre-install-hook.yaml", + Manifest: MockHookTemplate, + LastRun: &date, + Events: []release.Hook_Event{release.Hook_PRE_INSTALL}, + }, + }, + Manifest: MockManifest, + } +} diff --git a/vendor/k8s.io/helm/pkg/helm/interface.go b/vendor/k8s.io/helm/pkg/helm/interface.go new file mode 100644 index 00000000000..10c04c71089 --- /dev/null +++ b/vendor/k8s.io/helm/pkg/helm/interface.go @@ -0,0 +1,39 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package helm + +import ( + "k8s.io/helm/pkg/proto/hapi/chart" + rls "k8s.io/helm/pkg/proto/hapi/services" +) + +// Interface for helm client for mocking in tests +type Interface interface { + ListReleases(opts ...ReleaseListOption) (*rls.ListReleasesResponse, error) + InstallRelease(chStr, namespace string, opts ...InstallOption) (*rls.InstallReleaseResponse, error) + InstallReleaseFromChart(chart *chart.Chart, namespace string, opts ...InstallOption) (*rls.InstallReleaseResponse, error) + DeleteRelease(rlsName string, opts ...DeleteOption) (*rls.UninstallReleaseResponse, error) + ReleaseStatus(rlsName string, opts ...StatusOption) (*rls.GetReleaseStatusResponse, error) + UpdateRelease(rlsName, chStr string, opts ...UpdateOption) (*rls.UpdateReleaseResponse, error) + UpdateReleaseFromChart(rlsName string, chart *chart.Chart, opts ...UpdateOption) (*rls.UpdateReleaseResponse, error) + RollbackRelease(rlsName string, opts ...RollbackOption) (*rls.RollbackReleaseResponse, error) + ReleaseContent(rlsName string, opts ...ContentOption) (*rls.GetReleaseContentResponse, error) + ReleaseHistory(rlsName string, opts ...HistoryOption) (*rls.GetHistoryResponse, error) + GetVersion(opts ...VersionOption) (*rls.GetVersionResponse, error) + RunReleaseTest(rlsName string, opts ...ReleaseTestOption) (<-chan *rls.TestReleaseResponse, <-chan error) + PingTiller() error +} diff --git a/vendor/k8s.io/helm/pkg/helm/option.go b/vendor/k8s.io/helm/pkg/helm/option.go new file mode 100644 index 00000000000..3381e3f80fc --- /dev/null +++ b/vendor/k8s.io/helm/pkg/helm/option.go @@ -0,0 +1,444 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package helm + +import ( + "crypto/tls" + "time" + + "github.com/golang/protobuf/proto" + "golang.org/x/net/context" + "google.golang.org/grpc/metadata" + + cpb "k8s.io/helm/pkg/proto/hapi/chart" + "k8s.io/helm/pkg/proto/hapi/release" + rls "k8s.io/helm/pkg/proto/hapi/services" + "k8s.io/helm/pkg/version" +) + +// Option allows specifying various settings configurable by +// the helm client user for overriding the defaults used when +// issuing rpc's to the Tiller release server. +type Option func(*options) + +// options specify optional settings used by the helm client. +type options struct { + // value of helm home override + host string + // if set dry-run helm client calls + dryRun bool + // if set enable TLS on helm client calls + useTLS bool + // if set, re-use an existing name + reuseName bool + // if set, performs pod restart during upgrade/rollback + recreate bool + // if set, force resource update through delete/recreate if needed + force bool + // if set, skip running hooks + disableHooks bool + // name of release + releaseName string + // tls.Config to use for rpc if tls enabled + tlsConfig *tls.Config + // release list options are applied directly to the list releases request + listReq rls.ListReleasesRequest + // release install options are applied directly to the install release request + instReq rls.InstallReleaseRequest + // release update options are applied directly to the update release request + updateReq rls.UpdateReleaseRequest + // release uninstall options are applied directly to the uninstall release request + uninstallReq rls.UninstallReleaseRequest + // release get status options are applied directly to the get release status request + statusReq rls.GetReleaseStatusRequest + // release get content options are applied directly to the get release content request + contentReq rls.GetReleaseContentRequest + // release rollback options are applied directly to the rollback release request + rollbackReq rls.RollbackReleaseRequest + // before intercepts client calls before sending + before func(context.Context, proto.Message) error + // release history options are applied directly to the get release history request + histReq rls.GetHistoryRequest + // resetValues instructs Tiller to reset values to their defaults. + resetValues bool + // reuseValues instructs Tiller to reuse the values from the last release. + reuseValues bool + // release test options are applied directly to the test release history request + testReq rls.TestReleaseRequest + // connectTimeout specifies the time duration Helm will wait to establish a connection to tiller + connectTimeout time.Duration +} + +// Host specifies the host address of the Tiller release server, (default = ":44134"). +func Host(host string) Option { + return func(opts *options) { + opts.host = host + } +} + +// WithTLS specifies the tls configuration if the helm client is enabled to use TLS. +func WithTLS(cfg *tls.Config) Option { + return func(opts *options) { + opts.useTLS = true + opts.tlsConfig = cfg + } +} + +// BeforeCall returns an option that allows intercepting a helm client rpc +// before being sent OTA to tiller. The intercepting function should return +// an error to indicate that the call should not proceed or nil otherwise. +func BeforeCall(fn func(context.Context, proto.Message) error) Option { + return func(opts *options) { + opts.before = fn + } +} + +// ReleaseListOption allows specifying various settings +// configurable by the helm client user for overriding +// the defaults used when running the `helm list` command. +type ReleaseListOption func(*options) + +// ReleaseListOffset specifies the offset into a list of releases. +func ReleaseListOffset(offset string) ReleaseListOption { + return func(opts *options) { + opts.listReq.Offset = offset + } +} + +// ReleaseListFilter specifies a filter to apply a list of releases. +func ReleaseListFilter(filter string) ReleaseListOption { + return func(opts *options) { + opts.listReq.Filter = filter + } +} + +// ReleaseListLimit set an upper bound on the number of releases returned. +func ReleaseListLimit(limit int) ReleaseListOption { + return func(opts *options) { + opts.listReq.Limit = int64(limit) + } +} + +// ReleaseListOrder specifies how to order a list of releases. +func ReleaseListOrder(order int32) ReleaseListOption { + return func(opts *options) { + opts.listReq.SortOrder = rls.ListSort_SortOrder(order) + } +} + +// ReleaseListSort specifies how to sort a release list. +func ReleaseListSort(sort int32) ReleaseListOption { + return func(opts *options) { + opts.listReq.SortBy = rls.ListSort_SortBy(sort) + } +} + +// ReleaseListStatuses specifies which status codes should be returned. +func ReleaseListStatuses(statuses []release.Status_Code) ReleaseListOption { + return func(opts *options) { + if len(statuses) == 0 { + statuses = []release.Status_Code{release.Status_DEPLOYED} + } + opts.listReq.StatusCodes = statuses + } +} + +// ReleaseListNamespace specifies the namespace to list releases from +func ReleaseListNamespace(namespace string) ReleaseListOption { + return func(opts *options) { + opts.listReq.Namespace = namespace + } +} + +// InstallOption allows specifying various settings +// configurable by the helm client user for overriding +// the defaults used when running the `helm install` command. +type InstallOption func(*options) + +// ValueOverrides specifies a list of values to include when installing. +func ValueOverrides(raw []byte) InstallOption { + return func(opts *options) { + opts.instReq.Values = &cpb.Config{Raw: string(raw)} + } +} + +// ReleaseName specifies the name of the release when installing. +func ReleaseName(name string) InstallOption { + return func(opts *options) { + opts.instReq.Name = name + } +} + +// ConnectTimeout specifies the duration (in seconds) Helm will wait to establish a connection to tiller +func ConnectTimeout(timeout int64) Option { + return func(opts *options) { + opts.connectTimeout = time.Duration(timeout) * time.Second + } +} + +// InstallTimeout specifies the number of seconds before kubernetes calls timeout +func InstallTimeout(timeout int64) InstallOption { + return func(opts *options) { + opts.instReq.Timeout = timeout + } +} + +// UpgradeTimeout specifies the number of seconds before kubernetes calls timeout +func UpgradeTimeout(timeout int64) UpdateOption { + return func(opts *options) { + opts.updateReq.Timeout = timeout + } +} + +// DeleteTimeout specifies the number of seconds before kubernetes calls timeout +func DeleteTimeout(timeout int64) DeleteOption { + return func(opts *options) { + opts.uninstallReq.Timeout = timeout + } +} + +// ReleaseTestTimeout specifies the number of seconds before kubernetes calls timeout +func ReleaseTestTimeout(timeout int64) ReleaseTestOption { + return func(opts *options) { + opts.testReq.Timeout = timeout + } +} + +// ReleaseTestCleanup is a boolean value representing whether to cleanup test pods +func ReleaseTestCleanup(cleanup bool) ReleaseTestOption { + return func(opts *options) { + opts.testReq.Cleanup = cleanup + } +} + +// RollbackTimeout specifies the number of seconds before kubernetes calls timeout +func RollbackTimeout(timeout int64) RollbackOption { + return func(opts *options) { + opts.rollbackReq.Timeout = timeout + } +} + +// InstallWait specifies whether or not to wait for all resources to be ready +func InstallWait(wait bool) InstallOption { + return func(opts *options) { + opts.instReq.Wait = wait + } +} + +// UpgradeWait specifies whether or not to wait for all resources to be ready +func UpgradeWait(wait bool) UpdateOption { + return func(opts *options) { + opts.updateReq.Wait = wait + } +} + +// RollbackWait specifies whether or not to wait for all resources to be ready +func RollbackWait(wait bool) RollbackOption { + return func(opts *options) { + opts.rollbackReq.Wait = wait + } +} + +// UpdateValueOverrides specifies a list of values to include when upgrading +func UpdateValueOverrides(raw []byte) UpdateOption { + return func(opts *options) { + opts.updateReq.Values = &cpb.Config{Raw: string(raw)} + } +} + +// DeleteDisableHooks will disable hooks for a deletion operation. +func DeleteDisableHooks(disable bool) DeleteOption { + return func(opts *options) { + opts.disableHooks = disable + } +} + +// DeleteDryRun will (if true) execute a deletion as a dry run. +func DeleteDryRun(dry bool) DeleteOption { + return func(opts *options) { + opts.dryRun = dry + } +} + +// DeletePurge removes the release from the store and make its name free for later use. +func DeletePurge(purge bool) DeleteOption { + return func(opts *options) { + opts.uninstallReq.Purge = purge + } +} + +// InstallDryRun will (if true) execute an installation as a dry run. +func InstallDryRun(dry bool) InstallOption { + return func(opts *options) { + opts.dryRun = dry + } +} + +// InstallDisableHooks disables hooks during installation. +func InstallDisableHooks(disable bool) InstallOption { + return func(opts *options) { + opts.disableHooks = disable + } +} + +// InstallReuseName will (if true) instruct Tiller to re-use an existing name. +func InstallReuseName(reuse bool) InstallOption { + return func(opts *options) { + opts.reuseName = reuse + } +} + +// RollbackDisableHooks will disable hooks for a rollback operation +func RollbackDisableHooks(disable bool) RollbackOption { + return func(opts *options) { + opts.disableHooks = disable + } +} + +// RollbackDryRun will (if true) execute a rollback as a dry run. +func RollbackDryRun(dry bool) RollbackOption { + return func(opts *options) { + opts.dryRun = dry + } +} + +// RollbackRecreate will (if true) recreate pods after rollback. +func RollbackRecreate(recreate bool) RollbackOption { + return func(opts *options) { + opts.recreate = recreate + } +} + +// RollbackForce will (if true) force resource update through delete/recreate if needed +func RollbackForce(force bool) RollbackOption { + return func(opts *options) { + opts.force = force + } +} + +// RollbackVersion sets the version of the release to deploy. +func RollbackVersion(ver int32) RollbackOption { + return func(opts *options) { + opts.rollbackReq.Version = ver + } +} + +// UpgradeDisableHooks will disable hooks for an upgrade operation. +func UpgradeDisableHooks(disable bool) UpdateOption { + return func(opts *options) { + opts.disableHooks = disable + } +} + +// UpgradeDryRun will (if true) execute an upgrade as a dry run. +func UpgradeDryRun(dry bool) UpdateOption { + return func(opts *options) { + opts.dryRun = dry + } +} + +// ResetValues will (if true) trigger resetting the values to their original state. +func ResetValues(reset bool) UpdateOption { + return func(opts *options) { + opts.resetValues = reset + } +} + +// ReuseValues will cause Tiller to reuse the values from the last release. +// This is ignored if ResetValues is true. +func ReuseValues(reuse bool) UpdateOption { + return func(opts *options) { + opts.reuseValues = reuse + } +} + +// UpgradeRecreate will (if true) recreate pods after upgrade. +func UpgradeRecreate(recreate bool) UpdateOption { + return func(opts *options) { + opts.recreate = recreate + } +} + +// UpgradeForce will (if true) force resource update through delete/recreate if needed +func UpgradeForce(force bool) UpdateOption { + return func(opts *options) { + opts.force = force + } +} + +// ContentOption allows setting optional attributes when +// performing a GetReleaseContent tiller rpc. +type ContentOption func(*options) + +// ContentReleaseVersion will instruct Tiller to retrieve the content +// of a particular version of a release. +func ContentReleaseVersion(version int32) ContentOption { + return func(opts *options) { + opts.contentReq.Version = version + } +} + +// StatusOption allows setting optional attributes when +// performing a GetReleaseStatus tiller rpc. +type StatusOption func(*options) + +// StatusReleaseVersion will instruct Tiller to retrieve the status +// of a particular version of a release. +func StatusReleaseVersion(version int32) StatusOption { + return func(opts *options) { + opts.statusReq.Version = version + } +} + +// DeleteOption allows setting optional attributes when +// performing a UninstallRelease tiller rpc. +type DeleteOption func(*options) + +// VersionOption -- TODO +type VersionOption func(*options) + +// UpdateOption allows specifying various settings +// configurable by the helm client user for overriding +// the defaults used when running the `helm upgrade` command. +type UpdateOption func(*options) + +// RollbackOption allows specififying various settings configurable +// by the helm client user for overriding the defaults used when +// running the `helm rollback` command. +type RollbackOption func(*options) + +// HistoryOption allows configuring optional request data for +// issuing a GetHistory rpc. +type HistoryOption func(*options) + +// WithMaxHistory sets the max number of releases to return +// in a release history query. +func WithMaxHistory(max int32) HistoryOption { + return func(opts *options) { + opts.histReq.Max = max + } +} + +// NewContext creates a versioned context. +func NewContext() context.Context { + md := metadata.Pairs("x-helm-api-client", version.GetVersion()) + return metadata.NewOutgoingContext(context.TODO(), md) +} + +// ReleaseTestOption allows configuring optional request data for +// issuing a TestRelease rpc. +type ReleaseTestOption func(*options) diff --git a/vendor/k8s.io/helm/pkg/proto/hapi/chart/metadata.pb.go b/vendor/k8s.io/helm/pkg/proto/hapi/chart/metadata.pb.go index 49a4aa0ac4e..9daeaa9e565 100644 --- a/vendor/k8s.io/helm/pkg/proto/hapi/chart/metadata.pb.go +++ b/vendor/k8s.io/helm/pkg/proto/hapi/chart/metadata.pb.go @@ -107,7 +107,7 @@ type Metadata struct { // Annotations are additional mappings uninterpreted by Tiller, // made available for inspection by other applications. Annotations map[string]string `protobuf:"bytes,16,rep,name=annotations" json:"annotations,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` - // KubeVersion is a SemVer constraints on what version of Kubernetes is required. + // KubeVersion is a SemVer constraint specifying the version of Kubernetes required. KubeVersion string `protobuf:"bytes,17,opt,name=kubeVersion" json:"kubeVersion,omitempty"` } @@ -244,32 +244,33 @@ func init() { func init() { proto.RegisterFile("hapi/chart/metadata.proto", fileDescriptor2) } var fileDescriptor2 = []byte{ - // 427 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x52, 0x5d, 0x6b, 0xdb, 0x30, - 0x14, 0x9d, 0x9b, 0x38, 0x89, 0xaf, 0xd7, 0xcd, 0xbb, 0x8c, 0xa2, 0x95, 0x31, 0x4c, 0xd8, 0x20, - 0x4f, 0x29, 0x6c, 0x30, 0xca, 0x1e, 0x06, 0x1b, 0x94, 0x3e, 0x6c, 0x4d, 0x87, 0xd9, 0x07, 0xec, - 0x4d, 0xb5, 0x2f, 0x8d, 0x48, 0x2c, 0x19, 0x49, 0xe9, 0xc8, 0xaf, 0xd8, 0x5f, 0x1e, 0x92, 0xad, - 0xda, 0x19, 0x7d, 0xbb, 0xe7, 0x1c, 0xdd, 0x23, 0x1d, 0xdd, 0x0b, 0x2f, 0xd6, 0xbc, 0x11, 0x67, - 0xe5, 0x9a, 0x6b, 0x7b, 0x56, 0x93, 0xe5, 0x15, 0xb7, 0x7c, 0xd9, 0x68, 0x65, 0x15, 0x82, 0x93, - 0x96, 0x5e, 0x9a, 0xbf, 0x07, 0xb8, 0xe2, 0x42, 0x5a, 0x2e, 0x24, 0x69, 0x44, 0x18, 0x4b, 0x5e, - 0x13, 0x8b, 0xf2, 0x68, 0x91, 0x14, 0xbe, 0xc6, 0xe7, 0x10, 0x53, 0xcd, 0xc5, 0x96, 0x1d, 0x79, - 0xb2, 0x05, 0xf3, 0xbf, 0x31, 0xcc, 0xae, 0x3a, 0xdb, 0x07, 0xdb, 0x10, 0xc6, 0x6b, 0x55, 0x53, - 0xd7, 0xe5, 0x6b, 0x64, 0x30, 0x35, 0x6a, 0xa7, 0x4b, 0x32, 0x6c, 0x94, 0x8f, 0x16, 0x49, 0x11, - 0xa0, 0x53, 0xee, 0x48, 0x1b, 0xa1, 0x24, 0x1b, 0xfb, 0x86, 0x00, 0x31, 0x87, 0xb4, 0x22, 0x53, - 0x6a, 0xd1, 0x58, 0xa7, 0xc6, 0x5e, 0x1d, 0x52, 0x78, 0x0a, 0xb3, 0x0d, 0xed, 0xff, 0x28, 0x5d, - 0x19, 0x36, 0xf1, 0xb6, 0xf7, 0x18, 0xcf, 0x21, 0xad, 0xef, 0xe3, 0x19, 0x36, 0xcd, 0x47, 0x8b, - 0xf4, 0xed, 0xc9, 0xb2, 0xff, 0x80, 0x65, 0x9f, 0xbe, 0x18, 0x1e, 0xc5, 0x13, 0x98, 0x90, 0xbc, - 0x15, 0x92, 0xd8, 0xcc, 0x5f, 0xd9, 0x21, 0x97, 0x4b, 0x94, 0x4a, 0xb2, 0xa4, 0xcd, 0xe5, 0x6a, - 0x7c, 0x05, 0xc0, 0x1b, 0xf1, 0xb3, 0x0b, 0x00, 0x5e, 0x19, 0x30, 0xf8, 0x12, 0x92, 0x52, 0xc9, - 0x4a, 0xf8, 0x04, 0xa9, 0x97, 0x7b, 0xc2, 0x39, 0x5a, 0x7e, 0x6b, 0xd8, 0xe3, 0xd6, 0xd1, 0xd5, - 0xad, 0x63, 0x13, 0x1c, 0x8f, 0x83, 0x63, 0x60, 0x9c, 0x5e, 0x51, 0xa3, 0xa9, 0xe4, 0x96, 0x2a, - 0xf6, 0x24, 0x8f, 0x16, 0xb3, 0x62, 0xc0, 0xe0, 0x6b, 0x38, 0xb6, 0x62, 0xbb, 0x25, 0x1d, 0x2c, - 0x9e, 0x7a, 0x8b, 0x43, 0x12, 0x2f, 0x21, 0xe5, 0x52, 0x2a, 0xcb, 0xdd, 0x3b, 0x0c, 0xcb, 0xfc, - 0xef, 0xbc, 0x39, 0xf8, 0x9d, 0xb0, 0x39, 0x9f, 0xfa, 0x73, 0x17, 0xd2, 0xea, 0x7d, 0x31, 0xec, - 0x74, 0x43, 0xda, 0xec, 0x6e, 0x28, 0x5c, 0xf6, 0xac, 0x1d, 0xd2, 0x80, 0x3a, 0xfd, 0x08, 0xd9, - 0xff, 0x16, 0x98, 0xc1, 0x68, 0x43, 0xfb, 0x6e, 0x6b, 0x5c, 0xe9, 0x76, 0xed, 0x8e, 0x6f, 0x77, - 0x61, 0x6b, 0x5a, 0xf0, 0xe1, 0xe8, 0x3c, 0x9a, 0xe7, 0x30, 0xb9, 0x68, 0x07, 0x90, 0xc2, 0xf4, - 0xc7, 0xea, 0xcb, 0xea, 0xfa, 0xd7, 0x2a, 0x7b, 0x84, 0x09, 0xc4, 0x97, 0xd7, 0xdf, 0xbf, 0x7d, - 0xcd, 0xa2, 0xcf, 0xd3, 0xdf, 0xb1, 0x7f, 0xf3, 0xcd, 0xc4, 0x6f, 0xf9, 0xbb, 0x7f, 0x01, 0x00, - 0x00, 0xff, 0xff, 0x7f, 0xc1, 0xec, 0x3d, 0x02, 0x03, 0x00, 0x00, + // 435 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x52, 0x5d, 0x6b, 0xd4, 0x40, + 0x14, 0x35, 0xcd, 0x66, 0x77, 0x73, 0x63, 0x35, 0x0e, 0x52, 0xc6, 0x22, 0x12, 0x16, 0x85, 0x7d, + 0xda, 0x82, 0xbe, 0x14, 0x1f, 0x04, 0x85, 0x52, 0x41, 0xbb, 0x95, 0xe0, 0x07, 0xf8, 0x36, 0x4d, + 0x2e, 0xdd, 0x61, 0x93, 0x99, 0x30, 0x99, 0xad, 0xec, 0xaf, 0xf0, 0x2f, 0xcb, 0xdc, 0x64, 0x9a, + 0xac, 0xf4, 0xed, 0x9e, 0x73, 0x66, 0xce, 0xcc, 0xbd, 0xf7, 0xc0, 0x8b, 0x8d, 0x68, 0xe4, 0x59, + 0xb1, 0x11, 0xc6, 0x9e, 0xd5, 0x68, 0x45, 0x29, 0xac, 0x58, 0x35, 0x46, 0x5b, 0xcd, 0xc0, 0x49, + 0x2b, 0x92, 0x16, 0x9f, 0x01, 0xae, 0x84, 0x54, 0x56, 0x48, 0x85, 0x86, 0x31, 0x98, 0x28, 0x51, + 0x23, 0x0f, 0xb2, 0x60, 0x19, 0xe7, 0x54, 0xb3, 0xe7, 0x10, 0x61, 0x2d, 0x64, 0xc5, 0x8f, 0x88, + 0xec, 0x00, 0x4b, 0x21, 0xdc, 0x99, 0x8a, 0x87, 0xc4, 0xb9, 0x72, 0xf1, 0x37, 0x82, 0xf9, 0x55, + 0xff, 0xd0, 0x83, 0x46, 0x0c, 0x26, 0x1b, 0x5d, 0x63, 0xef, 0x43, 0x35, 0xe3, 0x30, 0x6b, 0xf5, + 0xce, 0x14, 0xd8, 0xf2, 0x30, 0x0b, 0x97, 0x71, 0xee, 0xa1, 0x53, 0xee, 0xd0, 0xb4, 0x52, 0x2b, + 0x3e, 0xa1, 0x0b, 0x1e, 0xb2, 0x0c, 0x92, 0x12, 0xdb, 0xc2, 0xc8, 0xc6, 0x3a, 0x35, 0x22, 0x75, + 0x4c, 0xb1, 0x53, 0x98, 0x6f, 0x71, 0xff, 0x47, 0x9b, 0xb2, 0xe5, 0x53, 0xb2, 0xbd, 0xc7, 0xec, + 0x1c, 0x92, 0xfa, 0xbe, 0xe1, 0x96, 0xcf, 0xb2, 0x70, 0x99, 0xbc, 0x3d, 0x59, 0x0d, 0x23, 0x59, + 0x0d, 0xf3, 0xc8, 0xc7, 0x47, 0xd9, 0x09, 0x4c, 0x51, 0xdd, 0x4a, 0x85, 0x7c, 0x4e, 0x4f, 0xf6, + 0xc8, 0xf5, 0x25, 0x0b, 0xad, 0x78, 0xdc, 0xf5, 0xe5, 0x6a, 0xf6, 0x0a, 0x40, 0x34, 0xf2, 0x67, + 0xdf, 0x00, 0x90, 0x32, 0x62, 0xd8, 0x4b, 0x88, 0x0b, 0xad, 0x4a, 0x49, 0x1d, 0x24, 0x24, 0x0f, + 0x84, 0x73, 0xb4, 0xe2, 0xb6, 0xe5, 0x8f, 0x3b, 0x47, 0x57, 0x77, 0x8e, 0x8d, 0x77, 0x3c, 0xf6, + 0x8e, 0x9e, 0x71, 0x7a, 0x89, 0x8d, 0xc1, 0x42, 0x58, 0x2c, 0xf9, 0x93, 0x2c, 0x58, 0xce, 0xf3, + 0x11, 0xc3, 0x5e, 0xc3, 0xb1, 0x95, 0x55, 0x85, 0xc6, 0x5b, 0x3c, 0x25, 0x8b, 0x43, 0x92, 0x5d, + 0x42, 0x22, 0x94, 0xd2, 0x56, 0xb8, 0x7f, 0xb4, 0x3c, 0xa5, 0xe9, 0xbc, 0x39, 0x98, 0x8e, 0xcf, + 0xd2, 0xc7, 0xe1, 0xdc, 0x85, 0xb2, 0x66, 0x9f, 0x8f, 0x6f, 0xba, 0x25, 0x6d, 0x77, 0x37, 0xe8, + 0x1f, 0x7b, 0xd6, 0x2d, 0x69, 0x44, 0x9d, 0x7e, 0x80, 0xf4, 0x7f, 0x0b, 0x97, 0xaa, 0x2d, 0xee, + 0xfb, 0xd4, 0xb8, 0xd2, 0xa5, 0xef, 0x4e, 0x54, 0x3b, 0x9f, 0x9a, 0x0e, 0xbc, 0x3f, 0x3a, 0x0f, + 0x16, 0x19, 0x4c, 0x2f, 0xba, 0x05, 0x24, 0x30, 0xfb, 0xb1, 0xfe, 0xb2, 0xbe, 0xfe, 0xb5, 0x4e, + 0x1f, 0xb1, 0x18, 0xa2, 0xcb, 0xeb, 0xef, 0xdf, 0xbe, 0xa6, 0xc1, 0xa7, 0xd9, 0xef, 0x88, 0xfe, + 0x7c, 0x33, 0xa5, 0xdc, 0xbf, 0xfb, 0x17, 0x00, 0x00, 0xff, 0xff, 0x36, 0xf9, 0x0d, 0xa6, 0x14, + 0x03, 0x00, 0x00, } diff --git a/vendor/k8s.io/helm/pkg/proto/hapi/release/hook.pb.go b/vendor/k8s.io/helm/pkg/proto/hapi/release/hook.pb.go new file mode 100644 index 00000000000..00fa5c18857 --- /dev/null +++ b/vendor/k8s.io/helm/pkg/proto/hapi/release/hook.pb.go @@ -0,0 +1,231 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: hapi/release/hook.proto + +/* +Package release is a generated protocol buffer package. + +It is generated from these files: + hapi/release/hook.proto + hapi/release/info.proto + hapi/release/release.proto + hapi/release/status.proto + hapi/release/test_run.proto + hapi/release/test_suite.proto + +It has these top-level messages: + Hook + Info + Release + Status + TestRun + TestSuite +*/ +package release + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import google_protobuf "github.com/golang/protobuf/ptypes/timestamp" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type Hook_Event int32 + +const ( + Hook_UNKNOWN Hook_Event = 0 + Hook_PRE_INSTALL Hook_Event = 1 + Hook_POST_INSTALL Hook_Event = 2 + Hook_PRE_DELETE Hook_Event = 3 + Hook_POST_DELETE Hook_Event = 4 + Hook_PRE_UPGRADE Hook_Event = 5 + Hook_POST_UPGRADE Hook_Event = 6 + Hook_PRE_ROLLBACK Hook_Event = 7 + Hook_POST_ROLLBACK Hook_Event = 8 + Hook_RELEASE_TEST_SUCCESS Hook_Event = 9 + Hook_RELEASE_TEST_FAILURE Hook_Event = 10 +) + +var Hook_Event_name = map[int32]string{ + 0: "UNKNOWN", + 1: "PRE_INSTALL", + 2: "POST_INSTALL", + 3: "PRE_DELETE", + 4: "POST_DELETE", + 5: "PRE_UPGRADE", + 6: "POST_UPGRADE", + 7: "PRE_ROLLBACK", + 8: "POST_ROLLBACK", + 9: "RELEASE_TEST_SUCCESS", + 10: "RELEASE_TEST_FAILURE", +} +var Hook_Event_value = map[string]int32{ + "UNKNOWN": 0, + "PRE_INSTALL": 1, + "POST_INSTALL": 2, + "PRE_DELETE": 3, + "POST_DELETE": 4, + "PRE_UPGRADE": 5, + "POST_UPGRADE": 6, + "PRE_ROLLBACK": 7, + "POST_ROLLBACK": 8, + "RELEASE_TEST_SUCCESS": 9, + "RELEASE_TEST_FAILURE": 10, +} + +func (x Hook_Event) String() string { + return proto.EnumName(Hook_Event_name, int32(x)) +} +func (Hook_Event) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{0, 0} } + +type Hook_DeletePolicy int32 + +const ( + Hook_SUCCEEDED Hook_DeletePolicy = 0 + Hook_FAILED Hook_DeletePolicy = 1 + Hook_BEFORE_HOOK_CREATION Hook_DeletePolicy = 2 +) + +var Hook_DeletePolicy_name = map[int32]string{ + 0: "SUCCEEDED", + 1: "FAILED", + 2: "BEFORE_HOOK_CREATION", +} +var Hook_DeletePolicy_value = map[string]int32{ + "SUCCEEDED": 0, + "FAILED": 1, + "BEFORE_HOOK_CREATION": 2, +} + +func (x Hook_DeletePolicy) String() string { + return proto.EnumName(Hook_DeletePolicy_name, int32(x)) +} +func (Hook_DeletePolicy) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{0, 1} } + +// Hook defines a hook object. +type Hook struct { + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // Kind is the Kubernetes kind. + Kind string `protobuf:"bytes,2,opt,name=kind" json:"kind,omitempty"` + // Path is the chart-relative path to the template. + Path string `protobuf:"bytes,3,opt,name=path" json:"path,omitempty"` + // Manifest is the manifest contents. + Manifest string `protobuf:"bytes,4,opt,name=manifest" json:"manifest,omitempty"` + // Events are the events that this hook fires on. + Events []Hook_Event `protobuf:"varint,5,rep,packed,name=events,enum=hapi.release.Hook_Event" json:"events,omitempty"` + // LastRun indicates the date/time this was last run. + LastRun *google_protobuf.Timestamp `protobuf:"bytes,6,opt,name=last_run,json=lastRun" json:"last_run,omitempty"` + // Weight indicates the sort order for execution among similar Hook type + Weight int32 `protobuf:"varint,7,opt,name=weight" json:"weight,omitempty"` + // DeletePolicies are the policies that indicate when to delete the hook + DeletePolicies []Hook_DeletePolicy `protobuf:"varint,8,rep,packed,name=delete_policies,json=deletePolicies,enum=hapi.release.Hook_DeletePolicy" json:"delete_policies,omitempty"` +} + +func (m *Hook) Reset() { *m = Hook{} } +func (m *Hook) String() string { return proto.CompactTextString(m) } +func (*Hook) ProtoMessage() {} +func (*Hook) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +func (m *Hook) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Hook) GetKind() string { + if m != nil { + return m.Kind + } + return "" +} + +func (m *Hook) GetPath() string { + if m != nil { + return m.Path + } + return "" +} + +func (m *Hook) GetManifest() string { + if m != nil { + return m.Manifest + } + return "" +} + +func (m *Hook) GetEvents() []Hook_Event { + if m != nil { + return m.Events + } + return nil +} + +func (m *Hook) GetLastRun() *google_protobuf.Timestamp { + if m != nil { + return m.LastRun + } + return nil +} + +func (m *Hook) GetWeight() int32 { + if m != nil { + return m.Weight + } + return 0 +} + +func (m *Hook) GetDeletePolicies() []Hook_DeletePolicy { + if m != nil { + return m.DeletePolicies + } + return nil +} + +func init() { + proto.RegisterType((*Hook)(nil), "hapi.release.Hook") + proto.RegisterEnum("hapi.release.Hook_Event", Hook_Event_name, Hook_Event_value) + proto.RegisterEnum("hapi.release.Hook_DeletePolicy", Hook_DeletePolicy_name, Hook_DeletePolicy_value) +} + +func init() { proto.RegisterFile("hapi/release/hook.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 445 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x91, 0x51, 0x8f, 0x9a, 0x40, + 0x10, 0x80, 0x8f, 0x13, 0x41, 0x47, 0xcf, 0xdb, 0x6e, 0x9a, 0x76, 0xe3, 0xcb, 0x19, 0x9f, 0x7c, + 0xc2, 0xe6, 0x9a, 0xfe, 0x00, 0x84, 0xb9, 0x6a, 0x24, 0x60, 0x16, 0x4c, 0x93, 0xbe, 0x10, 0xae, + 0xee, 0x29, 0x11, 0x81, 0x08, 0xb6, 0xe9, 0x0f, 0xec, 0x3f, 0xe8, 0x0f, 0x6a, 0x76, 0x45, 0x7b, + 0x49, 0xfb, 0x36, 0xf3, 0xcd, 0x37, 0xc3, 0x0c, 0x0b, 0xef, 0x77, 0x49, 0x99, 0x4e, 0x8f, 0x22, + 0x13, 0x49, 0x25, 0xa6, 0xbb, 0xa2, 0xd8, 0x5b, 0xe5, 0xb1, 0xa8, 0x0b, 0xda, 0x97, 0x05, 0xab, + 0x29, 0x0c, 0x1f, 0xb6, 0x45, 0xb1, 0xcd, 0xc4, 0x54, 0xd5, 0x9e, 0x4f, 0x2f, 0xd3, 0x3a, 0x3d, + 0x88, 0xaa, 0x4e, 0x0e, 0xe5, 0x59, 0x1f, 0xff, 0xd2, 0x41, 0x9f, 0x17, 0xc5, 0x9e, 0x52, 0xd0, + 0xf3, 0xe4, 0x20, 0x98, 0x36, 0xd2, 0x26, 0x5d, 0xae, 0x62, 0xc9, 0xf6, 0x69, 0xbe, 0x61, 0xb7, + 0x67, 0x26, 0x63, 0xc9, 0xca, 0xa4, 0xde, 0xb1, 0xd6, 0x99, 0xc9, 0x98, 0x0e, 0xa1, 0x73, 0x48, + 0xf2, 0xf4, 0x45, 0x54, 0x35, 0xd3, 0x15, 0xbf, 0xe6, 0xf4, 0x03, 0x18, 0xe2, 0xbb, 0xc8, 0xeb, + 0x8a, 0xb5, 0x47, 0xad, 0xc9, 0xe0, 0x91, 0x59, 0xaf, 0x17, 0xb4, 0xe4, 0xb7, 0x2d, 0x94, 0x02, + 0x6f, 0x3c, 0xfa, 0x09, 0x3a, 0x59, 0x52, 0xd5, 0xf1, 0xf1, 0x94, 0x33, 0x63, 0xa4, 0x4d, 0x7a, + 0x8f, 0x43, 0xeb, 0x7c, 0x86, 0x75, 0x39, 0xc3, 0x8a, 0x2e, 0x67, 0x70, 0x53, 0xba, 0xfc, 0x94, + 0xd3, 0x77, 0x60, 0xfc, 0x10, 0xe9, 0x76, 0x57, 0x33, 0x73, 0xa4, 0x4d, 0xda, 0xbc, 0xc9, 0xe8, + 0x1c, 0xee, 0x37, 0x22, 0x13, 0xb5, 0x88, 0xcb, 0x22, 0x4b, 0xbf, 0xa5, 0xa2, 0x62, 0x1d, 0xb5, + 0xc9, 0xc3, 0x7f, 0x36, 0x71, 0x95, 0xb9, 0x92, 0xe2, 0x4f, 0x3e, 0xd8, 0xfc, 0xcd, 0x52, 0x51, + 0x8d, 0x7f, 0x6b, 0xd0, 0x56, 0xab, 0xd2, 0x1e, 0x98, 0x6b, 0x7f, 0xe9, 0x07, 0x5f, 0x7c, 0x72, + 0x43, 0xef, 0xa1, 0xb7, 0xe2, 0x18, 0x2f, 0xfc, 0x30, 0xb2, 0x3d, 0x8f, 0x68, 0x94, 0x40, 0x7f, + 0x15, 0x84, 0xd1, 0x95, 0xdc, 0xd2, 0x01, 0x80, 0x54, 0x5c, 0xf4, 0x30, 0x42, 0xd2, 0x52, 0x2d, + 0xd2, 0x68, 0x80, 0x7e, 0x99, 0xb1, 0x5e, 0x7d, 0xe6, 0xb6, 0x8b, 0xa4, 0x7d, 0x9d, 0x71, 0x21, + 0x86, 0x22, 0x1c, 0x63, 0x1e, 0x78, 0xde, 0xcc, 0x76, 0x96, 0xc4, 0xa4, 0x6f, 0xe0, 0x4e, 0x39, + 0x57, 0xd4, 0xa1, 0x0c, 0xde, 0x72, 0xf4, 0xd0, 0x0e, 0x31, 0x8e, 0x30, 0x8c, 0xe2, 0x70, 0xed, + 0x38, 0x18, 0x86, 0xa4, 0xfb, 0x4f, 0xe5, 0xc9, 0x5e, 0x78, 0x6b, 0x8e, 0x04, 0xc6, 0x0e, 0xf4, + 0x5f, 0x9f, 0x4d, 0xef, 0xa0, 0xab, 0xda, 0xd0, 0x45, 0x97, 0xdc, 0x50, 0x00, 0x43, 0xba, 0xe8, + 0x12, 0x4d, 0x0e, 0x99, 0xe1, 0x53, 0xc0, 0x31, 0x9e, 0x07, 0xc1, 0x32, 0x76, 0x38, 0xda, 0xd1, + 0x22, 0xf0, 0xc9, 0xed, 0xac, 0xfb, 0xd5, 0x6c, 0x7e, 0xe4, 0xb3, 0xa1, 0x5e, 0xe9, 0xe3, 0x9f, + 0x00, 0x00, 0x00, 0xff, 0xff, 0x13, 0x64, 0x75, 0x6c, 0xa3, 0x02, 0x00, 0x00, +} diff --git a/vendor/k8s.io/helm/pkg/proto/hapi/release/info.pb.go b/vendor/k8s.io/helm/pkg/proto/hapi/release/info.pb.go new file mode 100644 index 00000000000..7a7ccdd7467 --- /dev/null +++ b/vendor/k8s.io/helm/pkg/proto/hapi/release/info.pb.go @@ -0,0 +1,90 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: hapi/release/info.proto + +package release + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import google_protobuf "github.com/golang/protobuf/ptypes/timestamp" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// Info describes release information. +type Info struct { + Status *Status `protobuf:"bytes,1,opt,name=status" json:"status,omitempty"` + FirstDeployed *google_protobuf.Timestamp `protobuf:"bytes,2,opt,name=first_deployed,json=firstDeployed" json:"first_deployed,omitempty"` + LastDeployed *google_protobuf.Timestamp `protobuf:"bytes,3,opt,name=last_deployed,json=lastDeployed" json:"last_deployed,omitempty"` + // Deleted tracks when this object was deleted. + Deleted *google_protobuf.Timestamp `protobuf:"bytes,4,opt,name=deleted" json:"deleted,omitempty"` + // Description is human-friendly "log entry" about this release. + Description string `protobuf:"bytes,5,opt,name=Description" json:"Description,omitempty"` +} + +func (m *Info) Reset() { *m = Info{} } +func (m *Info) String() string { return proto.CompactTextString(m) } +func (*Info) ProtoMessage() {} +func (*Info) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{0} } + +func (m *Info) GetStatus() *Status { + if m != nil { + return m.Status + } + return nil +} + +func (m *Info) GetFirstDeployed() *google_protobuf.Timestamp { + if m != nil { + return m.FirstDeployed + } + return nil +} + +func (m *Info) GetLastDeployed() *google_protobuf.Timestamp { + if m != nil { + return m.LastDeployed + } + return nil +} + +func (m *Info) GetDeleted() *google_protobuf.Timestamp { + if m != nil { + return m.Deleted + } + return nil +} + +func (m *Info) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func init() { + proto.RegisterType((*Info)(nil), "hapi.release.Info") +} + +func init() { proto.RegisterFile("hapi/release/info.proto", fileDescriptor1) } + +var fileDescriptor1 = []byte{ + // 235 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x8f, 0x31, 0x4f, 0xc3, 0x30, + 0x10, 0x85, 0x95, 0x52, 0x5a, 0xd5, 0x6d, 0x19, 0x2c, 0x24, 0x42, 0x16, 0x22, 0xa6, 0x0e, 0xc8, + 0x91, 0x80, 0x1d, 0x81, 0xba, 0xb0, 0x06, 0x26, 0x16, 0xe4, 0xe2, 0x73, 0xb1, 0xe4, 0xe6, 0x2c, + 0xfb, 0x3a, 0xf0, 0x2f, 0xf8, 0xc9, 0xa8, 0xb6, 0x83, 0xd2, 0xa9, 0xab, 0xbf, 0xf7, 0x3e, 0xbf, + 0x63, 0x57, 0xdf, 0xd2, 0x99, 0xc6, 0x83, 0x05, 0x19, 0xa0, 0x31, 0x9d, 0x46, 0xe1, 0x3c, 0x12, + 0xf2, 0xc5, 0x01, 0x88, 0x0c, 0xaa, 0x9b, 0x2d, 0xe2, 0xd6, 0x42, 0x13, 0xd9, 0x66, 0xaf, 0x1b, + 0x32, 0x3b, 0x08, 0x24, 0x77, 0x2e, 0xc5, 0xab, 0xeb, 0x23, 0x4f, 0x20, 0x49, 0xfb, 0x90, 0xd0, + 0xed, 0xef, 0x88, 0x8d, 0x5f, 0x3b, 0x8d, 0xfc, 0x8e, 0x4d, 0x12, 0x28, 0x8b, 0xba, 0x58, 0xcd, + 0xef, 0x2f, 0xc5, 0xf0, 0x0f, 0xf1, 0x16, 0x59, 0x9b, 0x33, 0xfc, 0x99, 0x5d, 0x68, 0xe3, 0x03, + 0x7d, 0x2a, 0x70, 0x16, 0x7f, 0x40, 0x95, 0xa3, 0xd8, 0xaa, 0x44, 0xda, 0x22, 0xfa, 0x2d, 0xe2, + 0xbd, 0xdf, 0xd2, 0x2e, 0x63, 0x63, 0x9d, 0x0b, 0xfc, 0x89, 0x2d, 0xad, 0x1c, 0x1a, 0xce, 0x4e, + 0x1a, 0x16, 0x87, 0xc2, 0xbf, 0xe0, 0x91, 0x4d, 0x15, 0x58, 0x20, 0x50, 0xe5, 0xf8, 0x64, 0xb5, + 0x8f, 0xf2, 0x9a, 0xcd, 0xd7, 0x10, 0xbe, 0xbc, 0x71, 0x64, 0xb0, 0x2b, 0xcf, 0xeb, 0x62, 0x35, + 0x6b, 0x87, 0x4f, 0x2f, 0xb3, 0x8f, 0x69, 0xbe, 0x7a, 0x33, 0x89, 0xa6, 0x87, 0xbf, 0x00, 0x00, + 0x00, 0xff, 0xff, 0x1a, 0x52, 0x8f, 0x9c, 0x89, 0x01, 0x00, 0x00, +} diff --git a/vendor/k8s.io/helm/pkg/proto/hapi/release/release.pb.go b/vendor/k8s.io/helm/pkg/proto/hapi/release/release.pb.go new file mode 100644 index 00000000000..511b543d790 --- /dev/null +++ b/vendor/k8s.io/helm/pkg/proto/hapi/release/release.pb.go @@ -0,0 +1,124 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: hapi/release/release.proto + +package release + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import hapi_chart "k8s.io/helm/pkg/proto/hapi/chart" +import hapi_chart3 "k8s.io/helm/pkg/proto/hapi/chart" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// Release describes a deployment of a chart, together with the chart +// and the variables used to deploy that chart. +type Release struct { + // Name is the name of the release + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // Info provides information about a release + Info *Info `protobuf:"bytes,2,opt,name=info" json:"info,omitempty"` + // Chart is the chart that was released. + Chart *hapi_chart3.Chart `protobuf:"bytes,3,opt,name=chart" json:"chart,omitempty"` + // Config is the set of extra Values added to the chart. + // These values override the default values inside of the chart. + Config *hapi_chart.Config `protobuf:"bytes,4,opt,name=config" json:"config,omitempty"` + // Manifest is the string representation of the rendered template. + Manifest string `protobuf:"bytes,5,opt,name=manifest" json:"manifest,omitempty"` + // Hooks are all of the hooks declared for this release. + Hooks []*Hook `protobuf:"bytes,6,rep,name=hooks" json:"hooks,omitempty"` + // Version is an int32 which represents the version of the release. + Version int32 `protobuf:"varint,7,opt,name=version" json:"version,omitempty"` + // Namespace is the kubernetes namespace of the release. + Namespace string `protobuf:"bytes,8,opt,name=namespace" json:"namespace,omitempty"` +} + +func (m *Release) Reset() { *m = Release{} } +func (m *Release) String() string { return proto.CompactTextString(m) } +func (*Release) ProtoMessage() {} +func (*Release) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{0} } + +func (m *Release) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Release) GetInfo() *Info { + if m != nil { + return m.Info + } + return nil +} + +func (m *Release) GetChart() *hapi_chart3.Chart { + if m != nil { + return m.Chart + } + return nil +} + +func (m *Release) GetConfig() *hapi_chart.Config { + if m != nil { + return m.Config + } + return nil +} + +func (m *Release) GetManifest() string { + if m != nil { + return m.Manifest + } + return "" +} + +func (m *Release) GetHooks() []*Hook { + if m != nil { + return m.Hooks + } + return nil +} + +func (m *Release) GetVersion() int32 { + if m != nil { + return m.Version + } + return 0 +} + +func (m *Release) GetNamespace() string { + if m != nil { + return m.Namespace + } + return "" +} + +func init() { + proto.RegisterType((*Release)(nil), "hapi.release.Release") +} + +func init() { proto.RegisterFile("hapi/release/release.proto", fileDescriptor2) } + +var fileDescriptor2 = []byte{ + // 256 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x64, 0x90, 0xbf, 0x4e, 0xc3, 0x40, + 0x0c, 0xc6, 0x95, 0x36, 0x7f, 0x1a, 0xc3, 0x82, 0x07, 0xb0, 0x22, 0x86, 0x88, 0x01, 0x22, 0x86, + 0x54, 0x82, 0x37, 0x80, 0x05, 0xd6, 0x1b, 0xd9, 0x8e, 0xe8, 0x42, 0x4e, 0xa5, 0xe7, 0x28, 0x17, + 0xf1, 0x2c, 0x3c, 0x2e, 0xba, 0x3f, 0x85, 0x94, 0x2e, 0x4e, 0xec, 0xdf, 0xa7, 0xcf, 0xdf, 0x19, + 0xaa, 0x41, 0x8e, 0x7a, 0x3b, 0xa9, 0x4f, 0x25, 0xad, 0x3a, 0x7c, 0xdb, 0x71, 0xe2, 0x99, 0xf1, + 0xdc, 0xb1, 0x36, 0xce, 0xaa, 0xab, 0x23, 0xe5, 0xc0, 0xbc, 0x0b, 0xb2, 0x7f, 0x40, 0x9b, 0x9e, + 0x8f, 0x40, 0x37, 0xc8, 0x69, 0xde, 0x76, 0x6c, 0x7a, 0xfd, 0x11, 0xc1, 0xe5, 0x12, 0xb8, 0x1a, + 0xe6, 0x37, 0xdf, 0x2b, 0x28, 0x44, 0xf0, 0x41, 0x84, 0xd4, 0xc8, 0xbd, 0xa2, 0xa4, 0x4e, 0x9a, + 0x52, 0xf8, 0x7f, 0xbc, 0x85, 0xd4, 0xd9, 0xd3, 0xaa, 0x4e, 0x9a, 0xb3, 0x07, 0x6c, 0x97, 0xf9, + 0xda, 0x57, 0xd3, 0xb3, 0xf0, 0x1c, 0xef, 0x20, 0xf3, 0xb6, 0xb4, 0xf6, 0xc2, 0x8b, 0x20, 0x0c, + 0x9b, 0x9e, 0x5d, 0x15, 0x81, 0xe3, 0x3d, 0xe4, 0x21, 0x18, 0xa5, 0x4b, 0xcb, 0xa8, 0xf4, 0x44, + 0x44, 0x05, 0x56, 0xb0, 0xd9, 0x4b, 0xa3, 0x7b, 0x65, 0x67, 0xca, 0x7c, 0xa8, 0xdf, 0x1e, 0x1b, + 0xc8, 0xdc, 0x41, 0x2c, 0xe5, 0xf5, 0xfa, 0x34, 0xd9, 0x0b, 0xf3, 0x4e, 0x04, 0x01, 0x12, 0x14, + 0x5f, 0x6a, 0xb2, 0x9a, 0x0d, 0x15, 0x75, 0xd2, 0x64, 0xe2, 0xd0, 0xe2, 0x35, 0x94, 0xee, 0x91, + 0x76, 0x94, 0x9d, 0xa2, 0x8d, 0x5f, 0xf0, 0x37, 0x78, 0x2a, 0xdf, 0x8a, 0x68, 0xf7, 0x9e, 0xfb, + 0x63, 0x3d, 0xfe, 0x04, 0x00, 0x00, 0xff, 0xff, 0xc8, 0x8f, 0xec, 0x97, 0xbb, 0x01, 0x00, 0x00, +} diff --git a/vendor/k8s.io/helm/pkg/proto/hapi/release/status.pb.go b/vendor/k8s.io/helm/pkg/proto/hapi/release/status.pb.go new file mode 100644 index 00000000000..284892642f9 --- /dev/null +++ b/vendor/k8s.io/helm/pkg/proto/hapi/release/status.pb.go @@ -0,0 +1,141 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: hapi/release/status.proto + +package release + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "github.com/golang/protobuf/ptypes/any" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +type Status_Code int32 + +const ( + // Status_UNKNOWN indicates that a release is in an uncertain state. + Status_UNKNOWN Status_Code = 0 + // Status_DEPLOYED indicates that the release has been pushed to Kubernetes. + Status_DEPLOYED Status_Code = 1 + // Status_DELETED indicates that a release has been deleted from Kubermetes. + Status_DELETED Status_Code = 2 + // Status_SUPERSEDED indicates that this release object is outdated and a newer one exists. + Status_SUPERSEDED Status_Code = 3 + // Status_FAILED indicates that the release was not successfully deployed. + Status_FAILED Status_Code = 4 + // Status_DELETING indicates that a delete operation is underway. + Status_DELETING Status_Code = 5 + // Status_PENDING_INSTALL indicates that an install operation is underway. + Status_PENDING_INSTALL Status_Code = 6 + // Status_PENDING_UPGRADE indicates that an upgrade operation is underway. + Status_PENDING_UPGRADE Status_Code = 7 + // Status_PENDING_ROLLBACK indicates that an rollback operation is underway. + Status_PENDING_ROLLBACK Status_Code = 8 +) + +var Status_Code_name = map[int32]string{ + 0: "UNKNOWN", + 1: "DEPLOYED", + 2: "DELETED", + 3: "SUPERSEDED", + 4: "FAILED", + 5: "DELETING", + 6: "PENDING_INSTALL", + 7: "PENDING_UPGRADE", + 8: "PENDING_ROLLBACK", +} +var Status_Code_value = map[string]int32{ + "UNKNOWN": 0, + "DEPLOYED": 1, + "DELETED": 2, + "SUPERSEDED": 3, + "FAILED": 4, + "DELETING": 5, + "PENDING_INSTALL": 6, + "PENDING_UPGRADE": 7, + "PENDING_ROLLBACK": 8, +} + +func (x Status_Code) String() string { + return proto.EnumName(Status_Code_name, int32(x)) +} +func (Status_Code) EnumDescriptor() ([]byte, []int) { return fileDescriptor3, []int{0, 0} } + +// Status defines the status of a release. +type Status struct { + Code Status_Code `protobuf:"varint,1,opt,name=code,enum=hapi.release.Status_Code" json:"code,omitempty"` + // Cluster resources as kubectl would print them. + Resources string `protobuf:"bytes,3,opt,name=resources" json:"resources,omitempty"` + // Contains the rendered templates/NOTES.txt if available + Notes string `protobuf:"bytes,4,opt,name=notes" json:"notes,omitempty"` + // LastTestSuiteRun provides results on the last test run on a release + LastTestSuiteRun *TestSuite `protobuf:"bytes,5,opt,name=last_test_suite_run,json=lastTestSuiteRun" json:"last_test_suite_run,omitempty"` +} + +func (m *Status) Reset() { *m = Status{} } +func (m *Status) String() string { return proto.CompactTextString(m) } +func (*Status) ProtoMessage() {} +func (*Status) Descriptor() ([]byte, []int) { return fileDescriptor3, []int{0} } + +func (m *Status) GetCode() Status_Code { + if m != nil { + return m.Code + } + return Status_UNKNOWN +} + +func (m *Status) GetResources() string { + if m != nil { + return m.Resources + } + return "" +} + +func (m *Status) GetNotes() string { + if m != nil { + return m.Notes + } + return "" +} + +func (m *Status) GetLastTestSuiteRun() *TestSuite { + if m != nil { + return m.LastTestSuiteRun + } + return nil +} + +func init() { + proto.RegisterType((*Status)(nil), "hapi.release.Status") + proto.RegisterEnum("hapi.release.Status_Code", Status_Code_name, Status_Code_value) +} + +func init() { proto.RegisterFile("hapi/release/status.proto", fileDescriptor3) } + +var fileDescriptor3 = []byte{ + // 333 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x54, 0x90, 0xd1, 0x6e, 0xa2, 0x40, + 0x14, 0x86, 0x17, 0x45, 0xd4, 0xa3, 0x71, 0x27, 0xa3, 0xc9, 0xa2, 0xd9, 0x4d, 0x8c, 0x57, 0xde, + 0x2c, 0x24, 0xf6, 0x09, 0xd0, 0x19, 0x0d, 0x71, 0x82, 0x04, 0x30, 0x4d, 0x7b, 0x43, 0x50, 0xa7, + 0xd6, 0xc4, 0x30, 0x86, 0x19, 0x2e, 0xfa, 0x26, 0x7d, 0xaa, 0x3e, 0x53, 0x03, 0xd8, 0xa8, 0x97, + 0xff, 0xff, 0x7d, 0x87, 0x73, 0x18, 0x18, 0xbe, 0x27, 0x97, 0x93, 0x9d, 0xf1, 0x33, 0x4f, 0x24, + 0xb7, 0xa5, 0x4a, 0x54, 0x2e, 0xad, 0x4b, 0x26, 0x94, 0xc0, 0xdd, 0x02, 0x59, 0x57, 0x34, 0xfa, + 0xf7, 0x20, 0x2a, 0x2e, 0x55, 0x2c, 0xf3, 0x93, 0xe2, 0x95, 0x3c, 0x1a, 0x1e, 0x85, 0x38, 0x9e, + 0xb9, 0x5d, 0xa6, 0x5d, 0xfe, 0x66, 0x27, 0xe9, 0x47, 0x85, 0x26, 0x5f, 0x35, 0x30, 0xc2, 0xf2, + 0xc3, 0xf8, 0x3f, 0xe8, 0x7b, 0x71, 0xe0, 0xa6, 0x36, 0xd6, 0xa6, 0xbd, 0xd9, 0xd0, 0xba, 0xdf, + 0x60, 0x55, 0x8e, 0xb5, 0x10, 0x07, 0x1e, 0x94, 0x1a, 0xfe, 0x0b, 0xed, 0x8c, 0x4b, 0x91, 0x67, + 0x7b, 0x2e, 0xcd, 0xfa, 0x58, 0x9b, 0xb6, 0x83, 0x5b, 0x81, 0x07, 0xd0, 0x48, 0x85, 0xe2, 0xd2, + 0xd4, 0x4b, 0x52, 0x05, 0xbc, 0x84, 0xfe, 0x39, 0x91, 0x2a, 0xbe, 0x5d, 0x18, 0x67, 0x79, 0x6a, + 0x36, 0xc6, 0xda, 0xb4, 0x33, 0xfb, 0xf3, 0xb8, 0x31, 0xe2, 0x52, 0x85, 0x85, 0x12, 0xa0, 0x62, + 0xe6, 0x16, 0xf3, 0x74, 0xf2, 0xa9, 0x81, 0x5e, 0x9c, 0x82, 0x3b, 0xd0, 0xdc, 0x7a, 0x6b, 0x6f, + 0xf3, 0xec, 0xa1, 0x5f, 0xb8, 0x0b, 0x2d, 0x42, 0x7d, 0xb6, 0x79, 0xa1, 0x04, 0x69, 0x05, 0x22, + 0x94, 0xd1, 0x88, 0x12, 0x54, 0xc3, 0x3d, 0x80, 0x70, 0xeb, 0xd3, 0x20, 0xa4, 0x84, 0x12, 0x54, + 0xc7, 0x00, 0xc6, 0xd2, 0x71, 0x19, 0x25, 0x48, 0xaf, 0xc6, 0x18, 0x8d, 0x5c, 0x6f, 0x85, 0x1a, + 0xb8, 0x0f, 0xbf, 0x7d, 0xea, 0x11, 0xd7, 0x5b, 0xc5, 0xae, 0x17, 0x46, 0x0e, 0x63, 0xc8, 0xb8, + 0x2f, 0xb7, 0xfe, 0x2a, 0x70, 0x08, 0x45, 0x4d, 0x3c, 0x00, 0xf4, 0x53, 0x06, 0x1b, 0xc6, 0xe6, + 0xce, 0x62, 0x8d, 0x5a, 0xf3, 0xf6, 0x6b, 0xf3, 0xfa, 0x07, 0x3b, 0xa3, 0x7c, 0xe2, 0xa7, 0xef, + 0x00, 0x00, 0x00, 0xff, 0xff, 0x09, 0x48, 0x18, 0xba, 0xc7, 0x01, 0x00, 0x00, +} diff --git a/vendor/k8s.io/helm/pkg/proto/hapi/release/test_run.pb.go b/vendor/k8s.io/helm/pkg/proto/hapi/release/test_run.pb.go new file mode 100644 index 00000000000..4d39d17c2b8 --- /dev/null +++ b/vendor/k8s.io/helm/pkg/proto/hapi/release/test_run.pb.go @@ -0,0 +1,118 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: hapi/release/test_run.proto + +package release + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import google_protobuf "github.com/golang/protobuf/ptypes/timestamp" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +type TestRun_Status int32 + +const ( + TestRun_UNKNOWN TestRun_Status = 0 + TestRun_SUCCESS TestRun_Status = 1 + TestRun_FAILURE TestRun_Status = 2 + TestRun_RUNNING TestRun_Status = 3 +) + +var TestRun_Status_name = map[int32]string{ + 0: "UNKNOWN", + 1: "SUCCESS", + 2: "FAILURE", + 3: "RUNNING", +} +var TestRun_Status_value = map[string]int32{ + "UNKNOWN": 0, + "SUCCESS": 1, + "FAILURE": 2, + "RUNNING": 3, +} + +func (x TestRun_Status) String() string { + return proto.EnumName(TestRun_Status_name, int32(x)) +} +func (TestRun_Status) EnumDescriptor() ([]byte, []int) { return fileDescriptor4, []int{0, 0} } + +type TestRun struct { + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Status TestRun_Status `protobuf:"varint,2,opt,name=status,enum=hapi.release.TestRun_Status" json:"status,omitempty"` + Info string `protobuf:"bytes,3,opt,name=info" json:"info,omitempty"` + StartedAt *google_protobuf.Timestamp `protobuf:"bytes,4,opt,name=started_at,json=startedAt" json:"started_at,omitempty"` + CompletedAt *google_protobuf.Timestamp `protobuf:"bytes,5,opt,name=completed_at,json=completedAt" json:"completed_at,omitempty"` +} + +func (m *TestRun) Reset() { *m = TestRun{} } +func (m *TestRun) String() string { return proto.CompactTextString(m) } +func (*TestRun) ProtoMessage() {} +func (*TestRun) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{0} } + +func (m *TestRun) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *TestRun) GetStatus() TestRun_Status { + if m != nil { + return m.Status + } + return TestRun_UNKNOWN +} + +func (m *TestRun) GetInfo() string { + if m != nil { + return m.Info + } + return "" +} + +func (m *TestRun) GetStartedAt() *google_protobuf.Timestamp { + if m != nil { + return m.StartedAt + } + return nil +} + +func (m *TestRun) GetCompletedAt() *google_protobuf.Timestamp { + if m != nil { + return m.CompletedAt + } + return nil +} + +func init() { + proto.RegisterType((*TestRun)(nil), "hapi.release.TestRun") + proto.RegisterEnum("hapi.release.TestRun_Status", TestRun_Status_name, TestRun_Status_value) +} + +func init() { proto.RegisterFile("hapi/release/test_run.proto", fileDescriptor4) } + +var fileDescriptor4 = []byte{ + // 274 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x8f, 0xc1, 0x4b, 0xfb, 0x30, + 0x1c, 0xc5, 0x7f, 0xe9, 0xf6, 0x6b, 0x69, 0x3a, 0xa4, 0xe4, 0x54, 0xa6, 0x60, 0xd9, 0xa9, 0xa7, + 0x14, 0xa6, 0x17, 0x41, 0x0f, 0x75, 0x4c, 0x19, 0x4a, 0x84, 0x74, 0x45, 0xf0, 0x32, 0x32, 0xcd, + 0x66, 0xa1, 0x6d, 0x4a, 0xf3, 0xed, 0xdf, 0xe3, 0xbf, 0x2a, 0x69, 0x33, 0xf1, 0xe6, 0xed, 0xfb, + 0x78, 0x9f, 0xf7, 0xf2, 0x82, 0xcf, 0x3f, 0x45, 0x5b, 0xa6, 0x9d, 0xac, 0xa4, 0xd0, 0x32, 0x05, + 0xa9, 0x61, 0xd7, 0xf5, 0x0d, 0x6d, 0x3b, 0x05, 0x8a, 0xcc, 0x8c, 0x49, 0xad, 0x39, 0xbf, 0x3c, + 0x2a, 0x75, 0xac, 0x64, 0x3a, 0x78, 0xfb, 0xfe, 0x90, 0x42, 0x59, 0x4b, 0x0d, 0xa2, 0x6e, 0x47, + 0x7c, 0xf1, 0xe5, 0x60, 0x6f, 0x2b, 0x35, 0xf0, 0xbe, 0x21, 0x04, 0x4f, 0x1b, 0x51, 0xcb, 0x08, + 0xc5, 0x28, 0xf1, 0xf9, 0x70, 0x93, 0x6b, 0xec, 0x6a, 0x10, 0xd0, 0xeb, 0xc8, 0x89, 0x51, 0x72, + 0xb6, 0xbc, 0xa0, 0xbf, 0xfb, 0xa9, 0x8d, 0xd2, 0x7c, 0x60, 0xb8, 0x65, 0x4d, 0x53, 0xd9, 0x1c, + 0x54, 0x34, 0x19, 0x9b, 0xcc, 0x4d, 0x6e, 0x30, 0xd6, 0x20, 0x3a, 0x90, 0x1f, 0x3b, 0x01, 0xd1, + 0x34, 0x46, 0x49, 0xb0, 0x9c, 0xd3, 0x71, 0x1f, 0x3d, 0xed, 0xa3, 0xdb, 0xd3, 0x3e, 0xee, 0x5b, + 0x3a, 0x03, 0x72, 0x87, 0x67, 0xef, 0xaa, 0x6e, 0x2b, 0x69, 0xc3, 0xff, 0xff, 0x0c, 0x07, 0x3f, + 0x7c, 0x06, 0x8b, 0x5b, 0xec, 0x8e, 0xfb, 0x48, 0x80, 0xbd, 0x82, 0x3d, 0xb1, 0x97, 0x57, 0x16, + 0xfe, 0x33, 0x22, 0x2f, 0x56, 0xab, 0x75, 0x9e, 0x87, 0xc8, 0x88, 0x87, 0x6c, 0xf3, 0x5c, 0xf0, + 0x75, 0xe8, 0x18, 0xc1, 0x0b, 0xc6, 0x36, 0xec, 0x31, 0x9c, 0xdc, 0xfb, 0x6f, 0x9e, 0xfd, 0xed, + 0xde, 0x1d, 0x5e, 0xba, 0xfa, 0x0e, 0x00, 0x00, 0xff, 0xff, 0x31, 0x86, 0x46, 0xdb, 0x81, 0x01, + 0x00, 0x00, +} diff --git a/vendor/k8s.io/helm/pkg/proto/hapi/release/test_suite.pb.go b/vendor/k8s.io/helm/pkg/proto/hapi/release/test_suite.pb.go new file mode 100644 index 00000000000..b7fa261476d --- /dev/null +++ b/vendor/k8s.io/helm/pkg/proto/hapi/release/test_suite.pb.go @@ -0,0 +1,73 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: hapi/release/test_suite.proto + +package release + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import google_protobuf "github.com/golang/protobuf/ptypes/timestamp" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// TestSuite comprises of the last run of the pre-defined test suite of a release version +type TestSuite struct { + // StartedAt indicates the date/time this test suite was kicked off + StartedAt *google_protobuf.Timestamp `protobuf:"bytes,1,opt,name=started_at,json=startedAt" json:"started_at,omitempty"` + // CompletedAt indicates the date/time this test suite was completed + CompletedAt *google_protobuf.Timestamp `protobuf:"bytes,2,opt,name=completed_at,json=completedAt" json:"completed_at,omitempty"` + // Results are the results of each segment of the test + Results []*TestRun `protobuf:"bytes,3,rep,name=results" json:"results,omitempty"` +} + +func (m *TestSuite) Reset() { *m = TestSuite{} } +func (m *TestSuite) String() string { return proto.CompactTextString(m) } +func (*TestSuite) ProtoMessage() {} +func (*TestSuite) Descriptor() ([]byte, []int) { return fileDescriptor5, []int{0} } + +func (m *TestSuite) GetStartedAt() *google_protobuf.Timestamp { + if m != nil { + return m.StartedAt + } + return nil +} + +func (m *TestSuite) GetCompletedAt() *google_protobuf.Timestamp { + if m != nil { + return m.CompletedAt + } + return nil +} + +func (m *TestSuite) GetResults() []*TestRun { + if m != nil { + return m.Results + } + return nil +} + +func init() { + proto.RegisterType((*TestSuite)(nil), "hapi.release.TestSuite") +} + +func init() { proto.RegisterFile("hapi/release/test_suite.proto", fileDescriptor5) } + +var fileDescriptor5 = []byte{ + // 207 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x8f, 0xc1, 0x4a, 0x86, 0x40, + 0x14, 0x85, 0x31, 0x21, 0x71, 0x74, 0x35, 0x10, 0x88, 0x11, 0x49, 0x2b, 0x57, 0x33, 0x60, 0xab, + 0x16, 0x2d, 0xec, 0x11, 0xcc, 0x55, 0x1b, 0x19, 0xeb, 0x66, 0xc2, 0xe8, 0x0c, 0x73, 0xef, 0xbc, + 0x5a, 0xcf, 0x17, 0xea, 0x18, 0x41, 0x8b, 0x7f, 0xfd, 0x7d, 0xe7, 0x9c, 0x7b, 0xd9, 0xdd, 0x97, + 0xb2, 0xb3, 0x74, 0xa0, 0x41, 0x21, 0x48, 0x02, 0xa4, 0x01, 0xfd, 0x4c, 0x20, 0xac, 0x33, 0x64, + 0x78, 0xbe, 0x61, 0x11, 0x70, 0x79, 0x3f, 0x19, 0x33, 0x69, 0x90, 0x3b, 0x1b, 0xfd, 0xa7, 0xa4, + 0x79, 0x01, 0x24, 0xb5, 0xd8, 0x43, 0x2f, 0x6f, 0xff, 0xb7, 0x39, 0xbf, 0x1e, 0xf0, 0xe1, 0x3b, + 0x62, 0x69, 0x0f, 0x48, 0xaf, 0x5b, 0x3f, 0x7f, 0x62, 0x0c, 0x49, 0x39, 0x82, 0x8f, 0x41, 0x51, + 0x11, 0x55, 0x51, 0x9d, 0x35, 0xa5, 0x38, 0x06, 0xc4, 0x39, 0x20, 0xfa, 0x73, 0xa0, 0x4b, 0x83, + 0xdd, 0x12, 0x7f, 0x66, 0xf9, 0xbb, 0x59, 0xac, 0x86, 0x10, 0xbe, 0xba, 0x18, 0xce, 0x7e, 0xfd, + 0x96, 0xb8, 0x64, 0x89, 0x03, 0xf4, 0x9a, 0xb0, 0x88, 0xab, 0xb8, 0xce, 0x9a, 0x1b, 0xf1, 0xf7, + 0x4b, 0xb1, 0xdd, 0xd8, 0xf9, 0xb5, 0x3b, 0xad, 0x97, 0xf4, 0x2d, 0x09, 0x6c, 0xbc, 0xde, 0xcb, + 0x1f, 0x7f, 0x02, 0x00, 0x00, 0xff, 0xff, 0x8c, 0x59, 0x65, 0x4f, 0x37, 0x01, 0x00, 0x00, +} diff --git a/vendor/k8s.io/helm/pkg/proto/hapi/services/tiller.pb.go b/vendor/k8s.io/helm/pkg/proto/hapi/services/tiller.pb.go new file mode 100644 index 00000000000..37535aac719 --- /dev/null +++ b/vendor/k8s.io/helm/pkg/proto/hapi/services/tiller.pb.go @@ -0,0 +1,1449 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: hapi/services/tiller.proto + +/* +Package services is a generated protocol buffer package. + +It is generated from these files: + hapi/services/tiller.proto + +It has these top-level messages: + ListReleasesRequest + ListSort + ListReleasesResponse + GetReleaseStatusRequest + GetReleaseStatusResponse + GetReleaseContentRequest + GetReleaseContentResponse + UpdateReleaseRequest + UpdateReleaseResponse + RollbackReleaseRequest + RollbackReleaseResponse + InstallReleaseRequest + InstallReleaseResponse + UninstallReleaseRequest + UninstallReleaseResponse + GetVersionRequest + GetVersionResponse + GetHistoryRequest + GetHistoryResponse + TestReleaseRequest + TestReleaseResponse +*/ +package services + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import hapi_chart3 "k8s.io/helm/pkg/proto/hapi/chart" +import hapi_chart "k8s.io/helm/pkg/proto/hapi/chart" +import hapi_release5 "k8s.io/helm/pkg/proto/hapi/release" +import hapi_release4 "k8s.io/helm/pkg/proto/hapi/release" +import hapi_release1 "k8s.io/helm/pkg/proto/hapi/release" +import hapi_release3 "k8s.io/helm/pkg/proto/hapi/release" +import hapi_version "k8s.io/helm/pkg/proto/hapi/version" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// SortBy defines sort operations. +type ListSort_SortBy int32 + +const ( + ListSort_UNKNOWN ListSort_SortBy = 0 + ListSort_NAME ListSort_SortBy = 1 + ListSort_LAST_RELEASED ListSort_SortBy = 2 +) + +var ListSort_SortBy_name = map[int32]string{ + 0: "UNKNOWN", + 1: "NAME", + 2: "LAST_RELEASED", +} +var ListSort_SortBy_value = map[string]int32{ + "UNKNOWN": 0, + "NAME": 1, + "LAST_RELEASED": 2, +} + +func (x ListSort_SortBy) String() string { + return proto.EnumName(ListSort_SortBy_name, int32(x)) +} +func (ListSort_SortBy) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{1, 0} } + +// SortOrder defines sort orders to augment sorting operations. +type ListSort_SortOrder int32 + +const ( + ListSort_ASC ListSort_SortOrder = 0 + ListSort_DESC ListSort_SortOrder = 1 +) + +var ListSort_SortOrder_name = map[int32]string{ + 0: "ASC", + 1: "DESC", +} +var ListSort_SortOrder_value = map[string]int32{ + "ASC": 0, + "DESC": 1, +} + +func (x ListSort_SortOrder) String() string { + return proto.EnumName(ListSort_SortOrder_name, int32(x)) +} +func (ListSort_SortOrder) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{1, 1} } + +// ListReleasesRequest requests a list of releases. +// +// Releases can be retrieved in chunks by setting limit and offset. +// +// Releases can be sorted according to a few pre-determined sort stategies. +type ListReleasesRequest struct { + // Limit is the maximum number of releases to be returned. + Limit int64 `protobuf:"varint,1,opt,name=limit" json:"limit,omitempty"` + // Offset is the last release name that was seen. The next listing + // operation will start with the name after this one. + // Example: If list one returns albert, bernie, carl, and sets 'next: dennis'. + // dennis is the offset. Supplying 'dennis' for the next request should + // cause the next batch to return a set of results starting with 'dennis'. + Offset string `protobuf:"bytes,2,opt,name=offset" json:"offset,omitempty"` + // SortBy is the sort field that the ListReleases server should sort data before returning. + SortBy ListSort_SortBy `protobuf:"varint,3,opt,name=sort_by,json=sortBy,enum=hapi.services.tiller.ListSort_SortBy" json:"sort_by,omitempty"` + // Filter is a regular expression used to filter which releases should be listed. + // + // Anything that matches the regexp will be included in the results. + Filter string `protobuf:"bytes,4,opt,name=filter" json:"filter,omitempty"` + // SortOrder is the ordering directive used for sorting. + SortOrder ListSort_SortOrder `protobuf:"varint,5,opt,name=sort_order,json=sortOrder,enum=hapi.services.tiller.ListSort_SortOrder" json:"sort_order,omitempty"` + StatusCodes []hapi_release3.Status_Code `protobuf:"varint,6,rep,packed,name=status_codes,json=statusCodes,enum=hapi.release.Status_Code" json:"status_codes,omitempty"` + // Namespace is the filter to select releases only from a specific namespace. + Namespace string `protobuf:"bytes,7,opt,name=namespace" json:"namespace,omitempty"` +} + +func (m *ListReleasesRequest) Reset() { *m = ListReleasesRequest{} } +func (m *ListReleasesRequest) String() string { return proto.CompactTextString(m) } +func (*ListReleasesRequest) ProtoMessage() {} +func (*ListReleasesRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +func (m *ListReleasesRequest) GetLimit() int64 { + if m != nil { + return m.Limit + } + return 0 +} + +func (m *ListReleasesRequest) GetOffset() string { + if m != nil { + return m.Offset + } + return "" +} + +func (m *ListReleasesRequest) GetSortBy() ListSort_SortBy { + if m != nil { + return m.SortBy + } + return ListSort_UNKNOWN +} + +func (m *ListReleasesRequest) GetFilter() string { + if m != nil { + return m.Filter + } + return "" +} + +func (m *ListReleasesRequest) GetSortOrder() ListSort_SortOrder { + if m != nil { + return m.SortOrder + } + return ListSort_ASC +} + +func (m *ListReleasesRequest) GetStatusCodes() []hapi_release3.Status_Code { + if m != nil { + return m.StatusCodes + } + return nil +} + +func (m *ListReleasesRequest) GetNamespace() string { + if m != nil { + return m.Namespace + } + return "" +} + +// ListSort defines sorting fields on a release list. +type ListSort struct { +} + +func (m *ListSort) Reset() { *m = ListSort{} } +func (m *ListSort) String() string { return proto.CompactTextString(m) } +func (*ListSort) ProtoMessage() {} +func (*ListSort) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } + +// ListReleasesResponse is a list of releases. +type ListReleasesResponse struct { + // Count is the expected total number of releases to be returned. + Count int64 `protobuf:"varint,1,opt,name=count" json:"count,omitempty"` + // Next is the name of the next release. If this is other than an empty + // string, it means there are more results. + Next string `protobuf:"bytes,2,opt,name=next" json:"next,omitempty"` + // Total is the total number of queryable releases. + Total int64 `protobuf:"varint,3,opt,name=total" json:"total,omitempty"` + // Releases is the list of found release objects. + Releases []*hapi_release5.Release `protobuf:"bytes,4,rep,name=releases" json:"releases,omitempty"` +} + +func (m *ListReleasesResponse) Reset() { *m = ListReleasesResponse{} } +func (m *ListReleasesResponse) String() string { return proto.CompactTextString(m) } +func (*ListReleasesResponse) ProtoMessage() {} +func (*ListReleasesResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } + +func (m *ListReleasesResponse) GetCount() int64 { + if m != nil { + return m.Count + } + return 0 +} + +func (m *ListReleasesResponse) GetNext() string { + if m != nil { + return m.Next + } + return "" +} + +func (m *ListReleasesResponse) GetTotal() int64 { + if m != nil { + return m.Total + } + return 0 +} + +func (m *ListReleasesResponse) GetReleases() []*hapi_release5.Release { + if m != nil { + return m.Releases + } + return nil +} + +// GetReleaseStatusRequest is a request to get the status of a release. +type GetReleaseStatusRequest struct { + // Name is the name of the release + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // Version is the version of the release + Version int32 `protobuf:"varint,2,opt,name=version" json:"version,omitempty"` +} + +func (m *GetReleaseStatusRequest) Reset() { *m = GetReleaseStatusRequest{} } +func (m *GetReleaseStatusRequest) String() string { return proto.CompactTextString(m) } +func (*GetReleaseStatusRequest) ProtoMessage() {} +func (*GetReleaseStatusRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } + +func (m *GetReleaseStatusRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *GetReleaseStatusRequest) GetVersion() int32 { + if m != nil { + return m.Version + } + return 0 +} + +// GetReleaseStatusResponse is the response indicating the status of the named release. +type GetReleaseStatusResponse struct { + // Name is the name of the release. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // Info contains information about the release. + Info *hapi_release4.Info `protobuf:"bytes,2,opt,name=info" json:"info,omitempty"` + // Namespace the release was released into + Namespace string `protobuf:"bytes,3,opt,name=namespace" json:"namespace,omitempty"` +} + +func (m *GetReleaseStatusResponse) Reset() { *m = GetReleaseStatusResponse{} } +func (m *GetReleaseStatusResponse) String() string { return proto.CompactTextString(m) } +func (*GetReleaseStatusResponse) ProtoMessage() {} +func (*GetReleaseStatusResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } + +func (m *GetReleaseStatusResponse) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *GetReleaseStatusResponse) GetInfo() *hapi_release4.Info { + if m != nil { + return m.Info + } + return nil +} + +func (m *GetReleaseStatusResponse) GetNamespace() string { + if m != nil { + return m.Namespace + } + return "" +} + +// GetReleaseContentRequest is a request to get the contents of a release. +type GetReleaseContentRequest struct { + // The name of the release + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // Version is the version of the release + Version int32 `protobuf:"varint,2,opt,name=version" json:"version,omitempty"` +} + +func (m *GetReleaseContentRequest) Reset() { *m = GetReleaseContentRequest{} } +func (m *GetReleaseContentRequest) String() string { return proto.CompactTextString(m) } +func (*GetReleaseContentRequest) ProtoMessage() {} +func (*GetReleaseContentRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } + +func (m *GetReleaseContentRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *GetReleaseContentRequest) GetVersion() int32 { + if m != nil { + return m.Version + } + return 0 +} + +// GetReleaseContentResponse is a response containing the contents of a release. +type GetReleaseContentResponse struct { + // The release content + Release *hapi_release5.Release `protobuf:"bytes,1,opt,name=release" json:"release,omitempty"` +} + +func (m *GetReleaseContentResponse) Reset() { *m = GetReleaseContentResponse{} } +func (m *GetReleaseContentResponse) String() string { return proto.CompactTextString(m) } +func (*GetReleaseContentResponse) ProtoMessage() {} +func (*GetReleaseContentResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} } + +func (m *GetReleaseContentResponse) GetRelease() *hapi_release5.Release { + if m != nil { + return m.Release + } + return nil +} + +// UpdateReleaseRequest updates a release. +type UpdateReleaseRequest struct { + // The name of the release + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // Chart is the protobuf representation of a chart. + Chart *hapi_chart3.Chart `protobuf:"bytes,2,opt,name=chart" json:"chart,omitempty"` + // Values is a string containing (unparsed) YAML values. + Values *hapi_chart.Config `protobuf:"bytes,3,opt,name=values" json:"values,omitempty"` + // dry_run, if true, will run through the release logic, but neither create + DryRun bool `protobuf:"varint,4,opt,name=dry_run,json=dryRun" json:"dry_run,omitempty"` + // DisableHooks causes the server to skip running any hooks for the upgrade. + DisableHooks bool `protobuf:"varint,5,opt,name=disable_hooks,json=disableHooks" json:"disable_hooks,omitempty"` + // Performs pods restart for resources if applicable + Recreate bool `protobuf:"varint,6,opt,name=recreate" json:"recreate,omitempty"` + // timeout specifies the max amount of time any kubernetes client command can run. + Timeout int64 `protobuf:"varint,7,opt,name=timeout" json:"timeout,omitempty"` + // ResetValues will cause Tiller to ignore stored values, resetting to default values. + ResetValues bool `protobuf:"varint,8,opt,name=reset_values,json=resetValues" json:"reset_values,omitempty"` + // wait, if true, will wait until all Pods, PVCs, and Services are in a ready state + // before marking the release as successful. It will wait for as long as timeout + Wait bool `protobuf:"varint,9,opt,name=wait" json:"wait,omitempty"` + // ReuseValues will cause Tiller to reuse the values from the last release. + // This is ignored if reset_values is set. + ReuseValues bool `protobuf:"varint,10,opt,name=reuse_values,json=reuseValues" json:"reuse_values,omitempty"` + // Force resource update through delete/recreate if needed. + Force bool `protobuf:"varint,11,opt,name=force" json:"force,omitempty"` +} + +func (m *UpdateReleaseRequest) Reset() { *m = UpdateReleaseRequest{} } +func (m *UpdateReleaseRequest) String() string { return proto.CompactTextString(m) } +func (*UpdateReleaseRequest) ProtoMessage() {} +func (*UpdateReleaseRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} } + +func (m *UpdateReleaseRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *UpdateReleaseRequest) GetChart() *hapi_chart3.Chart { + if m != nil { + return m.Chart + } + return nil +} + +func (m *UpdateReleaseRequest) GetValues() *hapi_chart.Config { + if m != nil { + return m.Values + } + return nil +} + +func (m *UpdateReleaseRequest) GetDryRun() bool { + if m != nil { + return m.DryRun + } + return false +} + +func (m *UpdateReleaseRequest) GetDisableHooks() bool { + if m != nil { + return m.DisableHooks + } + return false +} + +func (m *UpdateReleaseRequest) GetRecreate() bool { + if m != nil { + return m.Recreate + } + return false +} + +func (m *UpdateReleaseRequest) GetTimeout() int64 { + if m != nil { + return m.Timeout + } + return 0 +} + +func (m *UpdateReleaseRequest) GetResetValues() bool { + if m != nil { + return m.ResetValues + } + return false +} + +func (m *UpdateReleaseRequest) GetWait() bool { + if m != nil { + return m.Wait + } + return false +} + +func (m *UpdateReleaseRequest) GetReuseValues() bool { + if m != nil { + return m.ReuseValues + } + return false +} + +func (m *UpdateReleaseRequest) GetForce() bool { + if m != nil { + return m.Force + } + return false +} + +// UpdateReleaseResponse is the response to an update request. +type UpdateReleaseResponse struct { + Release *hapi_release5.Release `protobuf:"bytes,1,opt,name=release" json:"release,omitempty"` +} + +func (m *UpdateReleaseResponse) Reset() { *m = UpdateReleaseResponse{} } +func (m *UpdateReleaseResponse) String() string { return proto.CompactTextString(m) } +func (*UpdateReleaseResponse) ProtoMessage() {} +func (*UpdateReleaseResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} } + +func (m *UpdateReleaseResponse) GetRelease() *hapi_release5.Release { + if m != nil { + return m.Release + } + return nil +} + +type RollbackReleaseRequest struct { + // The name of the release + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // dry_run, if true, will run through the release logic but no create + DryRun bool `protobuf:"varint,2,opt,name=dry_run,json=dryRun" json:"dry_run,omitempty"` + // DisableHooks causes the server to skip running any hooks for the rollback + DisableHooks bool `protobuf:"varint,3,opt,name=disable_hooks,json=disableHooks" json:"disable_hooks,omitempty"` + // Version is the version of the release to deploy. + Version int32 `protobuf:"varint,4,opt,name=version" json:"version,omitempty"` + // Performs pods restart for resources if applicable + Recreate bool `protobuf:"varint,5,opt,name=recreate" json:"recreate,omitempty"` + // timeout specifies the max amount of time any kubernetes client command can run. + Timeout int64 `protobuf:"varint,6,opt,name=timeout" json:"timeout,omitempty"` + // wait, if true, will wait until all Pods, PVCs, and Services are in a ready state + // before marking the release as successful. It will wait for as long as timeout + Wait bool `protobuf:"varint,7,opt,name=wait" json:"wait,omitempty"` + // Force resource update through delete/recreate if needed. + Force bool `protobuf:"varint,8,opt,name=force" json:"force,omitempty"` +} + +func (m *RollbackReleaseRequest) Reset() { *m = RollbackReleaseRequest{} } +func (m *RollbackReleaseRequest) String() string { return proto.CompactTextString(m) } +func (*RollbackReleaseRequest) ProtoMessage() {} +func (*RollbackReleaseRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} } + +func (m *RollbackReleaseRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *RollbackReleaseRequest) GetDryRun() bool { + if m != nil { + return m.DryRun + } + return false +} + +func (m *RollbackReleaseRequest) GetDisableHooks() bool { + if m != nil { + return m.DisableHooks + } + return false +} + +func (m *RollbackReleaseRequest) GetVersion() int32 { + if m != nil { + return m.Version + } + return 0 +} + +func (m *RollbackReleaseRequest) GetRecreate() bool { + if m != nil { + return m.Recreate + } + return false +} + +func (m *RollbackReleaseRequest) GetTimeout() int64 { + if m != nil { + return m.Timeout + } + return 0 +} + +func (m *RollbackReleaseRequest) GetWait() bool { + if m != nil { + return m.Wait + } + return false +} + +func (m *RollbackReleaseRequest) GetForce() bool { + if m != nil { + return m.Force + } + return false +} + +// RollbackReleaseResponse is the response to an update request. +type RollbackReleaseResponse struct { + Release *hapi_release5.Release `protobuf:"bytes,1,opt,name=release" json:"release,omitempty"` +} + +func (m *RollbackReleaseResponse) Reset() { *m = RollbackReleaseResponse{} } +func (m *RollbackReleaseResponse) String() string { return proto.CompactTextString(m) } +func (*RollbackReleaseResponse) ProtoMessage() {} +func (*RollbackReleaseResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{10} } + +func (m *RollbackReleaseResponse) GetRelease() *hapi_release5.Release { + if m != nil { + return m.Release + } + return nil +} + +// InstallReleaseRequest is the request for an installation of a chart. +type InstallReleaseRequest struct { + // Chart is the protobuf representation of a chart. + Chart *hapi_chart3.Chart `protobuf:"bytes,1,opt,name=chart" json:"chart,omitempty"` + // Values is a string containing (unparsed) YAML values. + Values *hapi_chart.Config `protobuf:"bytes,2,opt,name=values" json:"values,omitempty"` + // DryRun, if true, will run through the release logic, but neither create + // a release object nor deploy to Kubernetes. The release object returned + // in the response will be fake. + DryRun bool `protobuf:"varint,3,opt,name=dry_run,json=dryRun" json:"dry_run,omitempty"` + // Name is the candidate release name. This must be unique to the + // namespace, otherwise the server will return an error. If it is not + // supplied, the server will autogenerate one. + Name string `protobuf:"bytes,4,opt,name=name" json:"name,omitempty"` + // DisableHooks causes the server to skip running any hooks for the install. + DisableHooks bool `protobuf:"varint,5,opt,name=disable_hooks,json=disableHooks" json:"disable_hooks,omitempty"` + // Namepace is the kubernetes namespace of the release. + Namespace string `protobuf:"bytes,6,opt,name=namespace" json:"namespace,omitempty"` + // ReuseName requests that Tiller re-uses a name, instead of erroring out. + ReuseName bool `protobuf:"varint,7,opt,name=reuse_name,json=reuseName" json:"reuse_name,omitempty"` + // timeout specifies the max amount of time any kubernetes client command can run. + Timeout int64 `protobuf:"varint,8,opt,name=timeout" json:"timeout,omitempty"` + // wait, if true, will wait until all Pods, PVCs, and Services are in a ready state + // before marking the release as successful. It will wait for as long as timeout + Wait bool `protobuf:"varint,9,opt,name=wait" json:"wait,omitempty"` +} + +func (m *InstallReleaseRequest) Reset() { *m = InstallReleaseRequest{} } +func (m *InstallReleaseRequest) String() string { return proto.CompactTextString(m) } +func (*InstallReleaseRequest) ProtoMessage() {} +func (*InstallReleaseRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{11} } + +func (m *InstallReleaseRequest) GetChart() *hapi_chart3.Chart { + if m != nil { + return m.Chart + } + return nil +} + +func (m *InstallReleaseRequest) GetValues() *hapi_chart.Config { + if m != nil { + return m.Values + } + return nil +} + +func (m *InstallReleaseRequest) GetDryRun() bool { + if m != nil { + return m.DryRun + } + return false +} + +func (m *InstallReleaseRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *InstallReleaseRequest) GetDisableHooks() bool { + if m != nil { + return m.DisableHooks + } + return false +} + +func (m *InstallReleaseRequest) GetNamespace() string { + if m != nil { + return m.Namespace + } + return "" +} + +func (m *InstallReleaseRequest) GetReuseName() bool { + if m != nil { + return m.ReuseName + } + return false +} + +func (m *InstallReleaseRequest) GetTimeout() int64 { + if m != nil { + return m.Timeout + } + return 0 +} + +func (m *InstallReleaseRequest) GetWait() bool { + if m != nil { + return m.Wait + } + return false +} + +// InstallReleaseResponse is the response from a release installation. +type InstallReleaseResponse struct { + Release *hapi_release5.Release `protobuf:"bytes,1,opt,name=release" json:"release,omitempty"` +} + +func (m *InstallReleaseResponse) Reset() { *m = InstallReleaseResponse{} } +func (m *InstallReleaseResponse) String() string { return proto.CompactTextString(m) } +func (*InstallReleaseResponse) ProtoMessage() {} +func (*InstallReleaseResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{12} } + +func (m *InstallReleaseResponse) GetRelease() *hapi_release5.Release { + if m != nil { + return m.Release + } + return nil +} + +// UninstallReleaseRequest represents a request to uninstall a named release. +type UninstallReleaseRequest struct { + // Name is the name of the release to delete. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // DisableHooks causes the server to skip running any hooks for the uninstall. + DisableHooks bool `protobuf:"varint,2,opt,name=disable_hooks,json=disableHooks" json:"disable_hooks,omitempty"` + // Purge removes the release from the store and make its name free for later use. + Purge bool `protobuf:"varint,3,opt,name=purge" json:"purge,omitempty"` + // timeout specifies the max amount of time any kubernetes client command can run. + Timeout int64 `protobuf:"varint,4,opt,name=timeout" json:"timeout,omitempty"` +} + +func (m *UninstallReleaseRequest) Reset() { *m = UninstallReleaseRequest{} } +func (m *UninstallReleaseRequest) String() string { return proto.CompactTextString(m) } +func (*UninstallReleaseRequest) ProtoMessage() {} +func (*UninstallReleaseRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{13} } + +func (m *UninstallReleaseRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *UninstallReleaseRequest) GetDisableHooks() bool { + if m != nil { + return m.DisableHooks + } + return false +} + +func (m *UninstallReleaseRequest) GetPurge() bool { + if m != nil { + return m.Purge + } + return false +} + +func (m *UninstallReleaseRequest) GetTimeout() int64 { + if m != nil { + return m.Timeout + } + return 0 +} + +// UninstallReleaseResponse represents a successful response to an uninstall request. +type UninstallReleaseResponse struct { + // Release is the release that was marked deleted. + Release *hapi_release5.Release `protobuf:"bytes,1,opt,name=release" json:"release,omitempty"` + // Info is an uninstall message + Info string `protobuf:"bytes,2,opt,name=info" json:"info,omitempty"` +} + +func (m *UninstallReleaseResponse) Reset() { *m = UninstallReleaseResponse{} } +func (m *UninstallReleaseResponse) String() string { return proto.CompactTextString(m) } +func (*UninstallReleaseResponse) ProtoMessage() {} +func (*UninstallReleaseResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{14} } + +func (m *UninstallReleaseResponse) GetRelease() *hapi_release5.Release { + if m != nil { + return m.Release + } + return nil +} + +func (m *UninstallReleaseResponse) GetInfo() string { + if m != nil { + return m.Info + } + return "" +} + +// GetVersionRequest requests for version information. +type GetVersionRequest struct { +} + +func (m *GetVersionRequest) Reset() { *m = GetVersionRequest{} } +func (m *GetVersionRequest) String() string { return proto.CompactTextString(m) } +func (*GetVersionRequest) ProtoMessage() {} +func (*GetVersionRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{15} } + +type GetVersionResponse struct { + Version *hapi_version.Version `protobuf:"bytes,1,opt,name=Version" json:"Version,omitempty"` +} + +func (m *GetVersionResponse) Reset() { *m = GetVersionResponse{} } +func (m *GetVersionResponse) String() string { return proto.CompactTextString(m) } +func (*GetVersionResponse) ProtoMessage() {} +func (*GetVersionResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{16} } + +func (m *GetVersionResponse) GetVersion() *hapi_version.Version { + if m != nil { + return m.Version + } + return nil +} + +// GetHistoryRequest requests a release's history. +type GetHistoryRequest struct { + // The name of the release. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // The maximum number of releases to include. + Max int32 `protobuf:"varint,2,opt,name=max" json:"max,omitempty"` +} + +func (m *GetHistoryRequest) Reset() { *m = GetHistoryRequest{} } +func (m *GetHistoryRequest) String() string { return proto.CompactTextString(m) } +func (*GetHistoryRequest) ProtoMessage() {} +func (*GetHistoryRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{17} } + +func (m *GetHistoryRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *GetHistoryRequest) GetMax() int32 { + if m != nil { + return m.Max + } + return 0 +} + +// GetHistoryResponse is received in response to a GetHistory rpc. +type GetHistoryResponse struct { + Releases []*hapi_release5.Release `protobuf:"bytes,1,rep,name=releases" json:"releases,omitempty"` +} + +func (m *GetHistoryResponse) Reset() { *m = GetHistoryResponse{} } +func (m *GetHistoryResponse) String() string { return proto.CompactTextString(m) } +func (*GetHistoryResponse) ProtoMessage() {} +func (*GetHistoryResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{18} } + +func (m *GetHistoryResponse) GetReleases() []*hapi_release5.Release { + if m != nil { + return m.Releases + } + return nil +} + +// TestReleaseRequest is a request to get the status of a release. +type TestReleaseRequest struct { + // Name is the name of the release + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // timeout specifies the max amount of time any kubernetes client command can run. + Timeout int64 `protobuf:"varint,2,opt,name=timeout" json:"timeout,omitempty"` + // cleanup specifies whether or not to attempt pod deletion after test completes + Cleanup bool `protobuf:"varint,3,opt,name=cleanup" json:"cleanup,omitempty"` +} + +func (m *TestReleaseRequest) Reset() { *m = TestReleaseRequest{} } +func (m *TestReleaseRequest) String() string { return proto.CompactTextString(m) } +func (*TestReleaseRequest) ProtoMessage() {} +func (*TestReleaseRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{19} } + +func (m *TestReleaseRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *TestReleaseRequest) GetTimeout() int64 { + if m != nil { + return m.Timeout + } + return 0 +} + +func (m *TestReleaseRequest) GetCleanup() bool { + if m != nil { + return m.Cleanup + } + return false +} + +// TestReleaseResponse represents a message from executing a test +type TestReleaseResponse struct { + Msg string `protobuf:"bytes,1,opt,name=msg" json:"msg,omitempty"` + Status hapi_release1.TestRun_Status `protobuf:"varint,2,opt,name=status,enum=hapi.release.TestRun_Status" json:"status,omitempty"` +} + +func (m *TestReleaseResponse) Reset() { *m = TestReleaseResponse{} } +func (m *TestReleaseResponse) String() string { return proto.CompactTextString(m) } +func (*TestReleaseResponse) ProtoMessage() {} +func (*TestReleaseResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{20} } + +func (m *TestReleaseResponse) GetMsg() string { + if m != nil { + return m.Msg + } + return "" +} + +func (m *TestReleaseResponse) GetStatus() hapi_release1.TestRun_Status { + if m != nil { + return m.Status + } + return hapi_release1.TestRun_UNKNOWN +} + +func init() { + proto.RegisterType((*ListReleasesRequest)(nil), "hapi.services.tiller.ListReleasesRequest") + proto.RegisterType((*ListSort)(nil), "hapi.services.tiller.ListSort") + proto.RegisterType((*ListReleasesResponse)(nil), "hapi.services.tiller.ListReleasesResponse") + proto.RegisterType((*GetReleaseStatusRequest)(nil), "hapi.services.tiller.GetReleaseStatusRequest") + proto.RegisterType((*GetReleaseStatusResponse)(nil), "hapi.services.tiller.GetReleaseStatusResponse") + proto.RegisterType((*GetReleaseContentRequest)(nil), "hapi.services.tiller.GetReleaseContentRequest") + proto.RegisterType((*GetReleaseContentResponse)(nil), "hapi.services.tiller.GetReleaseContentResponse") + proto.RegisterType((*UpdateReleaseRequest)(nil), "hapi.services.tiller.UpdateReleaseRequest") + proto.RegisterType((*UpdateReleaseResponse)(nil), "hapi.services.tiller.UpdateReleaseResponse") + proto.RegisterType((*RollbackReleaseRequest)(nil), "hapi.services.tiller.RollbackReleaseRequest") + proto.RegisterType((*RollbackReleaseResponse)(nil), "hapi.services.tiller.RollbackReleaseResponse") + proto.RegisterType((*InstallReleaseRequest)(nil), "hapi.services.tiller.InstallReleaseRequest") + proto.RegisterType((*InstallReleaseResponse)(nil), "hapi.services.tiller.InstallReleaseResponse") + proto.RegisterType((*UninstallReleaseRequest)(nil), "hapi.services.tiller.UninstallReleaseRequest") + proto.RegisterType((*UninstallReleaseResponse)(nil), "hapi.services.tiller.UninstallReleaseResponse") + proto.RegisterType((*GetVersionRequest)(nil), "hapi.services.tiller.GetVersionRequest") + proto.RegisterType((*GetVersionResponse)(nil), "hapi.services.tiller.GetVersionResponse") + proto.RegisterType((*GetHistoryRequest)(nil), "hapi.services.tiller.GetHistoryRequest") + proto.RegisterType((*GetHistoryResponse)(nil), "hapi.services.tiller.GetHistoryResponse") + proto.RegisterType((*TestReleaseRequest)(nil), "hapi.services.tiller.TestReleaseRequest") + proto.RegisterType((*TestReleaseResponse)(nil), "hapi.services.tiller.TestReleaseResponse") + proto.RegisterEnum("hapi.services.tiller.ListSort_SortBy", ListSort_SortBy_name, ListSort_SortBy_value) + proto.RegisterEnum("hapi.services.tiller.ListSort_SortOrder", ListSort_SortOrder_name, ListSort_SortOrder_value) +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// Client API for ReleaseService service + +type ReleaseServiceClient interface { + // ListReleases retrieves release history. + // TODO: Allow filtering the set of releases by + // release status. By default, ListAllReleases returns the releases who + // current status is "Active". + ListReleases(ctx context.Context, in *ListReleasesRequest, opts ...grpc.CallOption) (ReleaseService_ListReleasesClient, error) + // GetReleasesStatus retrieves status information for the specified release. + GetReleaseStatus(ctx context.Context, in *GetReleaseStatusRequest, opts ...grpc.CallOption) (*GetReleaseStatusResponse, error) + // GetReleaseContent retrieves the release content (chart + value) for the specified release. + GetReleaseContent(ctx context.Context, in *GetReleaseContentRequest, opts ...grpc.CallOption) (*GetReleaseContentResponse, error) + // UpdateRelease updates release content. + UpdateRelease(ctx context.Context, in *UpdateReleaseRequest, opts ...grpc.CallOption) (*UpdateReleaseResponse, error) + // InstallRelease requests installation of a chart as a new release. + InstallRelease(ctx context.Context, in *InstallReleaseRequest, opts ...grpc.CallOption) (*InstallReleaseResponse, error) + // UninstallRelease requests deletion of a named release. + UninstallRelease(ctx context.Context, in *UninstallReleaseRequest, opts ...grpc.CallOption) (*UninstallReleaseResponse, error) + // GetVersion returns the current version of the server. + GetVersion(ctx context.Context, in *GetVersionRequest, opts ...grpc.CallOption) (*GetVersionResponse, error) + // RollbackRelease rolls back a release to a previous version. + RollbackRelease(ctx context.Context, in *RollbackReleaseRequest, opts ...grpc.CallOption) (*RollbackReleaseResponse, error) + // ReleaseHistory retrieves a releasse's history. + GetHistory(ctx context.Context, in *GetHistoryRequest, opts ...grpc.CallOption) (*GetHistoryResponse, error) + // RunReleaseTest executes the tests defined of a named release + RunReleaseTest(ctx context.Context, in *TestReleaseRequest, opts ...grpc.CallOption) (ReleaseService_RunReleaseTestClient, error) +} + +type releaseServiceClient struct { + cc *grpc.ClientConn +} + +func NewReleaseServiceClient(cc *grpc.ClientConn) ReleaseServiceClient { + return &releaseServiceClient{cc} +} + +func (c *releaseServiceClient) ListReleases(ctx context.Context, in *ListReleasesRequest, opts ...grpc.CallOption) (ReleaseService_ListReleasesClient, error) { + stream, err := grpc.NewClientStream(ctx, &_ReleaseService_serviceDesc.Streams[0], c.cc, "/hapi.services.tiller.ReleaseService/ListReleases", opts...) + if err != nil { + return nil, err + } + x := &releaseServiceListReleasesClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type ReleaseService_ListReleasesClient interface { + Recv() (*ListReleasesResponse, error) + grpc.ClientStream +} + +type releaseServiceListReleasesClient struct { + grpc.ClientStream +} + +func (x *releaseServiceListReleasesClient) Recv() (*ListReleasesResponse, error) { + m := new(ListReleasesResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *releaseServiceClient) GetReleaseStatus(ctx context.Context, in *GetReleaseStatusRequest, opts ...grpc.CallOption) (*GetReleaseStatusResponse, error) { + out := new(GetReleaseStatusResponse) + err := grpc.Invoke(ctx, "/hapi.services.tiller.ReleaseService/GetReleaseStatus", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *releaseServiceClient) GetReleaseContent(ctx context.Context, in *GetReleaseContentRequest, opts ...grpc.CallOption) (*GetReleaseContentResponse, error) { + out := new(GetReleaseContentResponse) + err := grpc.Invoke(ctx, "/hapi.services.tiller.ReleaseService/GetReleaseContent", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *releaseServiceClient) UpdateRelease(ctx context.Context, in *UpdateReleaseRequest, opts ...grpc.CallOption) (*UpdateReleaseResponse, error) { + out := new(UpdateReleaseResponse) + err := grpc.Invoke(ctx, "/hapi.services.tiller.ReleaseService/UpdateRelease", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *releaseServiceClient) InstallRelease(ctx context.Context, in *InstallReleaseRequest, opts ...grpc.CallOption) (*InstallReleaseResponse, error) { + out := new(InstallReleaseResponse) + err := grpc.Invoke(ctx, "/hapi.services.tiller.ReleaseService/InstallRelease", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *releaseServiceClient) UninstallRelease(ctx context.Context, in *UninstallReleaseRequest, opts ...grpc.CallOption) (*UninstallReleaseResponse, error) { + out := new(UninstallReleaseResponse) + err := grpc.Invoke(ctx, "/hapi.services.tiller.ReleaseService/UninstallRelease", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *releaseServiceClient) GetVersion(ctx context.Context, in *GetVersionRequest, opts ...grpc.CallOption) (*GetVersionResponse, error) { + out := new(GetVersionResponse) + err := grpc.Invoke(ctx, "/hapi.services.tiller.ReleaseService/GetVersion", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *releaseServiceClient) RollbackRelease(ctx context.Context, in *RollbackReleaseRequest, opts ...grpc.CallOption) (*RollbackReleaseResponse, error) { + out := new(RollbackReleaseResponse) + err := grpc.Invoke(ctx, "/hapi.services.tiller.ReleaseService/RollbackRelease", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *releaseServiceClient) GetHistory(ctx context.Context, in *GetHistoryRequest, opts ...grpc.CallOption) (*GetHistoryResponse, error) { + out := new(GetHistoryResponse) + err := grpc.Invoke(ctx, "/hapi.services.tiller.ReleaseService/GetHistory", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *releaseServiceClient) RunReleaseTest(ctx context.Context, in *TestReleaseRequest, opts ...grpc.CallOption) (ReleaseService_RunReleaseTestClient, error) { + stream, err := grpc.NewClientStream(ctx, &_ReleaseService_serviceDesc.Streams[1], c.cc, "/hapi.services.tiller.ReleaseService/RunReleaseTest", opts...) + if err != nil { + return nil, err + } + x := &releaseServiceRunReleaseTestClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + if err := x.ClientStream.CloseSend(); err != nil { + return nil, err + } + return x, nil +} + +type ReleaseService_RunReleaseTestClient interface { + Recv() (*TestReleaseResponse, error) + grpc.ClientStream +} + +type releaseServiceRunReleaseTestClient struct { + grpc.ClientStream +} + +func (x *releaseServiceRunReleaseTestClient) Recv() (*TestReleaseResponse, error) { + m := new(TestReleaseResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// Server API for ReleaseService service + +type ReleaseServiceServer interface { + // ListReleases retrieves release history. + // TODO: Allow filtering the set of releases by + // release status. By default, ListAllReleases returns the releases who + // current status is "Active". + ListReleases(*ListReleasesRequest, ReleaseService_ListReleasesServer) error + // GetReleasesStatus retrieves status information for the specified release. + GetReleaseStatus(context.Context, *GetReleaseStatusRequest) (*GetReleaseStatusResponse, error) + // GetReleaseContent retrieves the release content (chart + value) for the specified release. + GetReleaseContent(context.Context, *GetReleaseContentRequest) (*GetReleaseContentResponse, error) + // UpdateRelease updates release content. + UpdateRelease(context.Context, *UpdateReleaseRequest) (*UpdateReleaseResponse, error) + // InstallRelease requests installation of a chart as a new release. + InstallRelease(context.Context, *InstallReleaseRequest) (*InstallReleaseResponse, error) + // UninstallRelease requests deletion of a named release. + UninstallRelease(context.Context, *UninstallReleaseRequest) (*UninstallReleaseResponse, error) + // GetVersion returns the current version of the server. + GetVersion(context.Context, *GetVersionRequest) (*GetVersionResponse, error) + // RollbackRelease rolls back a release to a previous version. + RollbackRelease(context.Context, *RollbackReleaseRequest) (*RollbackReleaseResponse, error) + // ReleaseHistory retrieves a releasse's history. + GetHistory(context.Context, *GetHistoryRequest) (*GetHistoryResponse, error) + // RunReleaseTest executes the tests defined of a named release + RunReleaseTest(*TestReleaseRequest, ReleaseService_RunReleaseTestServer) error +} + +func RegisterReleaseServiceServer(s *grpc.Server, srv ReleaseServiceServer) { + s.RegisterService(&_ReleaseService_serviceDesc, srv) +} + +func _ReleaseService_ListReleases_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(ListReleasesRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(ReleaseServiceServer).ListReleases(m, &releaseServiceListReleasesServer{stream}) +} + +type ReleaseService_ListReleasesServer interface { + Send(*ListReleasesResponse) error + grpc.ServerStream +} + +type releaseServiceListReleasesServer struct { + grpc.ServerStream +} + +func (x *releaseServiceListReleasesServer) Send(m *ListReleasesResponse) error { + return x.ServerStream.SendMsg(m) +} + +func _ReleaseService_GetReleaseStatus_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetReleaseStatusRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ReleaseServiceServer).GetReleaseStatus(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/hapi.services.tiller.ReleaseService/GetReleaseStatus", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ReleaseServiceServer).GetReleaseStatus(ctx, req.(*GetReleaseStatusRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ReleaseService_GetReleaseContent_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetReleaseContentRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ReleaseServiceServer).GetReleaseContent(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/hapi.services.tiller.ReleaseService/GetReleaseContent", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ReleaseServiceServer).GetReleaseContent(ctx, req.(*GetReleaseContentRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ReleaseService_UpdateRelease_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateReleaseRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ReleaseServiceServer).UpdateRelease(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/hapi.services.tiller.ReleaseService/UpdateRelease", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ReleaseServiceServer).UpdateRelease(ctx, req.(*UpdateReleaseRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ReleaseService_InstallRelease_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(InstallReleaseRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ReleaseServiceServer).InstallRelease(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/hapi.services.tiller.ReleaseService/InstallRelease", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ReleaseServiceServer).InstallRelease(ctx, req.(*InstallReleaseRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ReleaseService_UninstallRelease_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UninstallReleaseRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ReleaseServiceServer).UninstallRelease(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/hapi.services.tiller.ReleaseService/UninstallRelease", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ReleaseServiceServer).UninstallRelease(ctx, req.(*UninstallReleaseRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ReleaseService_GetVersion_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetVersionRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ReleaseServiceServer).GetVersion(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/hapi.services.tiller.ReleaseService/GetVersion", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ReleaseServiceServer).GetVersion(ctx, req.(*GetVersionRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ReleaseService_RollbackRelease_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RollbackReleaseRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ReleaseServiceServer).RollbackRelease(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/hapi.services.tiller.ReleaseService/RollbackRelease", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ReleaseServiceServer).RollbackRelease(ctx, req.(*RollbackReleaseRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ReleaseService_GetHistory_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetHistoryRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ReleaseServiceServer).GetHistory(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/hapi.services.tiller.ReleaseService/GetHistory", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ReleaseServiceServer).GetHistory(ctx, req.(*GetHistoryRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ReleaseService_RunReleaseTest_Handler(srv interface{}, stream grpc.ServerStream) error { + m := new(TestReleaseRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(ReleaseServiceServer).RunReleaseTest(m, &releaseServiceRunReleaseTestServer{stream}) +} + +type ReleaseService_RunReleaseTestServer interface { + Send(*TestReleaseResponse) error + grpc.ServerStream +} + +type releaseServiceRunReleaseTestServer struct { + grpc.ServerStream +} + +func (x *releaseServiceRunReleaseTestServer) Send(m *TestReleaseResponse) error { + return x.ServerStream.SendMsg(m) +} + +var _ReleaseService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "hapi.services.tiller.ReleaseService", + HandlerType: (*ReleaseServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "GetReleaseStatus", + Handler: _ReleaseService_GetReleaseStatus_Handler, + }, + { + MethodName: "GetReleaseContent", + Handler: _ReleaseService_GetReleaseContent_Handler, + }, + { + MethodName: "UpdateRelease", + Handler: _ReleaseService_UpdateRelease_Handler, + }, + { + MethodName: "InstallRelease", + Handler: _ReleaseService_InstallRelease_Handler, + }, + { + MethodName: "UninstallRelease", + Handler: _ReleaseService_UninstallRelease_Handler, + }, + { + MethodName: "GetVersion", + Handler: _ReleaseService_GetVersion_Handler, + }, + { + MethodName: "RollbackRelease", + Handler: _ReleaseService_RollbackRelease_Handler, + }, + { + MethodName: "GetHistory", + Handler: _ReleaseService_GetHistory_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "ListReleases", + Handler: _ReleaseService_ListReleases_Handler, + ServerStreams: true, + }, + { + StreamName: "RunReleaseTest", + Handler: _ReleaseService_RunReleaseTest_Handler, + ServerStreams: true, + }, + }, + Metadata: "hapi/services/tiller.proto", +} + +func init() { proto.RegisterFile("hapi/services/tiller.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 1217 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x57, 0xdd, 0x6e, 0xe3, 0xc4, + 0x17, 0xaf, 0xf3, 0x9d, 0x93, 0x36, 0xff, 0x74, 0x9a, 0xb6, 0xae, 0xff, 0x0b, 0x2a, 0x46, 0xb0, + 0xd9, 0x85, 0x4d, 0x21, 0x70, 0x83, 0x84, 0x90, 0xba, 0xdd, 0xa8, 0x2d, 0x94, 0xae, 0xe4, 0x6c, + 0x17, 0x09, 0x01, 0x91, 0x9b, 0x4c, 0x5a, 0xb3, 0x8e, 0x27, 0x78, 0xc6, 0x65, 0x7b, 0xcb, 0x1d, + 0x8f, 0xc2, 0x5b, 0xf0, 0x1e, 0x5c, 0xc2, 0x83, 0x20, 0xcf, 0x87, 0xeb, 0x49, 0xed, 0xd6, 0xf4, + 0x26, 0x9e, 0x99, 0xf3, 0xfd, 0x3b, 0x67, 0xce, 0x9c, 0x80, 0x75, 0xe9, 0x2e, 0xbc, 0x3d, 0x8a, + 0xc3, 0x2b, 0x6f, 0x82, 0xe9, 0x1e, 0xf3, 0x7c, 0x1f, 0x87, 0xfd, 0x45, 0x48, 0x18, 0x41, 0xdd, + 0x98, 0xd6, 0x57, 0xb4, 0xbe, 0xa0, 0x59, 0x5b, 0x5c, 0x62, 0x72, 0xe9, 0x86, 0x4c, 0xfc, 0x0a, + 0x6e, 0x6b, 0x3b, 0x7d, 0x4e, 0x82, 0x99, 0x77, 0x21, 0x09, 0xc2, 0x44, 0x88, 0x7d, 0xec, 0x52, + 0xac, 0xbe, 0x9a, 0x90, 0xa2, 0x79, 0xc1, 0x8c, 0x48, 0xc2, 0xff, 0x35, 0x02, 0xc3, 0x94, 0x8d, + 0xc3, 0x28, 0x90, 0xc4, 0x1d, 0x8d, 0x48, 0x99, 0xcb, 0x22, 0xaa, 0x19, 0xbb, 0xc2, 0x21, 0xf5, + 0x48, 0xa0, 0xbe, 0x82, 0x66, 0xff, 0x59, 0x82, 0x8d, 0x13, 0x8f, 0x32, 0x47, 0x08, 0x52, 0x07, + 0xff, 0x12, 0x61, 0xca, 0x50, 0x17, 0xaa, 0xbe, 0x37, 0xf7, 0x98, 0x69, 0xec, 0x1a, 0xbd, 0xb2, + 0x23, 0x36, 0x68, 0x0b, 0x6a, 0x64, 0x36, 0xa3, 0x98, 0x99, 0xa5, 0x5d, 0xa3, 0xd7, 0x74, 0xe4, + 0x0e, 0x7d, 0x05, 0x75, 0x4a, 0x42, 0x36, 0x3e, 0xbf, 0x36, 0xcb, 0xbb, 0x46, 0xaf, 0x3d, 0xf8, + 0xa0, 0x9f, 0x85, 0x53, 0x3f, 0xb6, 0x34, 0x22, 0x21, 0xeb, 0xc7, 0x3f, 0xcf, 0xaf, 0x9d, 0x1a, + 0xe5, 0xdf, 0x58, 0xef, 0xcc, 0xf3, 0x19, 0x0e, 0xcd, 0x8a, 0xd0, 0x2b, 0x76, 0xe8, 0x10, 0x80, + 0xeb, 0x25, 0xe1, 0x14, 0x87, 0x66, 0x95, 0xab, 0xee, 0x15, 0x50, 0xfd, 0x32, 0xe6, 0x77, 0x9a, + 0x54, 0x2d, 0xd1, 0x97, 0xb0, 0x2a, 0x20, 0x19, 0x4f, 0xc8, 0x14, 0x53, 0xb3, 0xb6, 0x5b, 0xee, + 0xb5, 0x07, 0x3b, 0x42, 0x95, 0x82, 0x7f, 0x24, 0x40, 0x3b, 0x20, 0x53, 0xec, 0xb4, 0x04, 0x7b, + 0xbc, 0xa6, 0xe8, 0x11, 0x34, 0x03, 0x77, 0x8e, 0xe9, 0xc2, 0x9d, 0x60, 0xb3, 0xce, 0x3d, 0xbc, + 0x39, 0xb0, 0x7f, 0x82, 0x86, 0x32, 0x6e, 0x0f, 0xa0, 0x26, 0x42, 0x43, 0x2d, 0xa8, 0x9f, 0x9d, + 0x7e, 0x73, 0xfa, 0xf2, 0xbb, 0xd3, 0xce, 0x0a, 0x6a, 0x40, 0xe5, 0x74, 0xff, 0xdb, 0x61, 0xc7, + 0x40, 0xeb, 0xb0, 0x76, 0xb2, 0x3f, 0x7a, 0x35, 0x76, 0x86, 0x27, 0xc3, 0xfd, 0xd1, 0xf0, 0x45, + 0xa7, 0x64, 0xbf, 0x0b, 0xcd, 0xc4, 0x67, 0x54, 0x87, 0xf2, 0xfe, 0xe8, 0x40, 0x88, 0xbc, 0x18, + 0x8e, 0x0e, 0x3a, 0x86, 0xfd, 0xbb, 0x01, 0x5d, 0x3d, 0x45, 0x74, 0x41, 0x02, 0x8a, 0xe3, 0x1c, + 0x4d, 0x48, 0x14, 0x24, 0x39, 0xe2, 0x1b, 0x84, 0xa0, 0x12, 0xe0, 0xb7, 0x2a, 0x43, 0x7c, 0x1d, + 0x73, 0x32, 0xc2, 0x5c, 0x9f, 0x67, 0xa7, 0xec, 0x88, 0x0d, 0xfa, 0x14, 0x1a, 0x32, 0x74, 0x6a, + 0x56, 0x76, 0xcb, 0xbd, 0xd6, 0x60, 0x53, 0x07, 0x44, 0x5a, 0x74, 0x12, 0x36, 0xfb, 0x10, 0xb6, + 0x0f, 0xb1, 0xf2, 0x44, 0xe0, 0xa5, 0x2a, 0x26, 0xb6, 0xeb, 0xce, 0x31, 0x77, 0x26, 0xb6, 0xeb, + 0xce, 0x31, 0x32, 0xa1, 0x2e, 0xcb, 0x8d, 0xbb, 0x53, 0x75, 0xd4, 0xd6, 0x66, 0x60, 0xde, 0x56, + 0x24, 0xe3, 0xca, 0xd2, 0xf4, 0x21, 0x54, 0xe2, 0x9b, 0xc0, 0xd5, 0xb4, 0x06, 0x48, 0xf7, 0xf3, + 0x38, 0x98, 0x11, 0x87, 0xd3, 0xf5, 0x54, 0x95, 0x97, 0x53, 0x75, 0x94, 0xb6, 0x7a, 0x40, 0x02, + 0x86, 0x03, 0xf6, 0x30, 0xff, 0x4f, 0x60, 0x27, 0x43, 0x93, 0x0c, 0x60, 0x0f, 0xea, 0xd2, 0x35, + 0xae, 0x2d, 0x17, 0x57, 0xc5, 0x65, 0xff, 0x5d, 0x82, 0xee, 0xd9, 0x62, 0xea, 0x32, 0xac, 0x48, + 0x77, 0x38, 0xf5, 0x18, 0xaa, 0xbc, 0xa3, 0x48, 0x2c, 0xd6, 0x85, 0x6e, 0xd1, 0x76, 0x0e, 0xe2, + 0x5f, 0x47, 0xd0, 0xd1, 0x53, 0xa8, 0x5d, 0xb9, 0x7e, 0x84, 0x29, 0x07, 0x22, 0x41, 0x4d, 0x72, + 0xf2, 0x76, 0xe4, 0x48, 0x0e, 0xb4, 0x0d, 0xf5, 0x69, 0x78, 0x1d, 0xf7, 0x13, 0x7e, 0x05, 0x1b, + 0x4e, 0x6d, 0x1a, 0x5e, 0x3b, 0x51, 0x80, 0xde, 0x87, 0xb5, 0xa9, 0x47, 0xdd, 0x73, 0x1f, 0x8f, + 0x2f, 0x09, 0x79, 0x43, 0xf9, 0x2d, 0x6c, 0x38, 0xab, 0xf2, 0xf0, 0x28, 0x3e, 0x43, 0x56, 0x5c, + 0x49, 0x93, 0x10, 0xbb, 0x0c, 0x9b, 0x35, 0x4e, 0x4f, 0xf6, 0x31, 0x86, 0xcc, 0x9b, 0x63, 0x12, + 0x31, 0x7e, 0x75, 0xca, 0x8e, 0xda, 0xa2, 0xf7, 0x60, 0x35, 0xc4, 0x14, 0xb3, 0xb1, 0xf4, 0xb2, + 0xc1, 0x25, 0x5b, 0xfc, 0xec, 0xb5, 0x70, 0x0b, 0x41, 0xe5, 0x57, 0xd7, 0x63, 0x66, 0x93, 0x93, + 0xf8, 0x5a, 0x88, 0x45, 0x14, 0x2b, 0x31, 0x50, 0x62, 0x11, 0xc5, 0x52, 0xac, 0x0b, 0xd5, 0x19, + 0x09, 0x27, 0xd8, 0x6c, 0x71, 0x9a, 0xd8, 0xd8, 0x47, 0xb0, 0xb9, 0x04, 0xf2, 0x43, 0xf3, 0xf5, + 0x8f, 0x01, 0x5b, 0x0e, 0xf1, 0xfd, 0x73, 0x77, 0xf2, 0xa6, 0x40, 0xc6, 0x52, 0xe0, 0x96, 0xee, + 0x06, 0xb7, 0x9c, 0x01, 0x6e, 0xaa, 0x08, 0x2b, 0x5a, 0x11, 0x6a, 0xb0, 0x57, 0xf3, 0x61, 0xaf, + 0xe9, 0xb0, 0x2b, 0x4c, 0xeb, 0x29, 0x4c, 0x13, 0xc0, 0x1a, 0x69, 0xc0, 0xbe, 0x86, 0xed, 0x5b, + 0x51, 0x3e, 0x14, 0xb2, 0x3f, 0x4a, 0xb0, 0x79, 0x1c, 0x50, 0xe6, 0xfa, 0xfe, 0x12, 0x62, 0x49, + 0x3d, 0x1b, 0x85, 0xeb, 0xb9, 0xf4, 0x5f, 0xea, 0xb9, 0xac, 0x41, 0xae, 0xf2, 0x53, 0x49, 0xe5, + 0xa7, 0x50, 0x8d, 0x6b, 0x9d, 0xa5, 0xb6, 0xd4, 0x59, 0xd0, 0x3b, 0x00, 0xa2, 0x28, 0xb9, 0x72, + 0x01, 0x6d, 0x93, 0x9f, 0x9c, 0xca, 0x46, 0xa2, 0xb2, 0xd1, 0xc8, 0xce, 0x46, 0xaa, 0xc2, 0xed, + 0x63, 0xd8, 0x5a, 0x86, 0xea, 0xa1, 0xb0, 0xff, 0x66, 0xc0, 0xf6, 0x59, 0xe0, 0x65, 0x02, 0x9f, + 0x55, 0xaa, 0xb7, 0xa0, 0x28, 0x65, 0x40, 0xd1, 0x85, 0xea, 0x22, 0x0a, 0x2f, 0xb0, 0x84, 0x56, + 0x6c, 0xd2, 0x31, 0x56, 0xb4, 0x18, 0xed, 0x31, 0x98, 0xb7, 0x7d, 0x78, 0x60, 0x44, 0xb1, 0xd7, + 0xc9, 0x4b, 0xd0, 0x14, 0x5d, 0xdf, 0xde, 0x80, 0xf5, 0x43, 0xcc, 0x5e, 0x8b, 0x6b, 0x21, 0xc3, + 0xb3, 0x87, 0x80, 0xd2, 0x87, 0x37, 0xf6, 0xe4, 0x91, 0x6e, 0x4f, 0x8d, 0x45, 0x8a, 0x5f, 0x71, + 0xd9, 0x5f, 0x70, 0xdd, 0x47, 0x1e, 0x65, 0x24, 0xbc, 0xbe, 0x0b, 0xba, 0x0e, 0x94, 0xe7, 0xee, + 0x5b, 0xf9, 0x50, 0xc4, 0x4b, 0xfb, 0x90, 0x7b, 0x90, 0x88, 0x4a, 0x0f, 0xd2, 0xcf, 0xae, 0x51, + 0xec, 0xd9, 0xfd, 0x01, 0xd0, 0x2b, 0x9c, 0x4c, 0x00, 0xf7, 0xbc, 0x58, 0x2a, 0x09, 0x25, 0xbd, + 0xd0, 0x4c, 0xa8, 0x4f, 0x7c, 0xec, 0x06, 0xd1, 0x42, 0xa6, 0x4d, 0x6d, 0xed, 0x1f, 0x61, 0x43, + 0xd3, 0x2e, 0xfd, 0x8c, 0xe3, 0xa1, 0x17, 0x52, 0x7b, 0xbc, 0x44, 0x9f, 0x43, 0x4d, 0x8c, 0x45, + 0x5c, 0x77, 0x7b, 0xf0, 0x48, 0xf7, 0x9b, 0x2b, 0x89, 0x02, 0x39, 0x47, 0x39, 0x92, 0x77, 0xf0, + 0x57, 0x03, 0xda, 0xea, 0xa1, 0x17, 0x43, 0x1b, 0xf2, 0x60, 0x35, 0x3d, 0xd1, 0xa0, 0x27, 0xf9, + 0x33, 0xdd, 0xd2, 0x60, 0x6a, 0x3d, 0x2d, 0xc2, 0x2a, 0x22, 0xb0, 0x57, 0x3e, 0x31, 0x10, 0x85, + 0xce, 0xf2, 0xa0, 0x81, 0x9e, 0x65, 0xeb, 0xc8, 0x99, 0x6c, 0xac, 0x7e, 0x51, 0x76, 0x65, 0x16, + 0x5d, 0xf1, 0x9a, 0xd1, 0xa7, 0x03, 0x74, 0xaf, 0x1a, 0x7d, 0x20, 0xb1, 0xf6, 0x0a, 0xf3, 0x27, + 0x76, 0x7f, 0x86, 0x35, 0xed, 0x85, 0x43, 0x39, 0x68, 0x65, 0xcd, 0x1a, 0xd6, 0x47, 0x85, 0x78, + 0x13, 0x5b, 0x73, 0x68, 0xeb, 0x4d, 0x0a, 0xe5, 0x28, 0xc8, 0xec, 0xfa, 0xd6, 0xc7, 0xc5, 0x98, + 0x13, 0x73, 0x14, 0x3a, 0xcb, 0x3d, 0x24, 0x2f, 0x8f, 0x39, 0xfd, 0x2e, 0x2f, 0x8f, 0x79, 0xad, + 0xc9, 0x5e, 0x41, 0x2e, 0xc0, 0x4d, 0x0b, 0x41, 0x8f, 0x73, 0x13, 0xa2, 0x77, 0x1e, 0xab, 0x77, + 0x3f, 0x63, 0x62, 0x62, 0x01, 0xff, 0x5b, 0x7a, 0x63, 0x51, 0x0e, 0x34, 0xd9, 0x03, 0x87, 0xf5, + 0xac, 0x20, 0xf7, 0x52, 0x50, 0xb2, 0x2b, 0xdd, 0x11, 0x94, 0xde, 0xf2, 0xee, 0x08, 0x6a, 0xa9, + 0xc1, 0xd9, 0x2b, 0xc8, 0x83, 0xb6, 0x13, 0x05, 0xd2, 0x74, 0xdc, 0x16, 0x50, 0x8e, 0xf4, 0xed, + 0xae, 0x66, 0x3d, 0x29, 0xc0, 0x79, 0x73, 0xbf, 0x9f, 0xc3, 0xf7, 0x0d, 0xc5, 0x7a, 0x5e, 0xe3, + 0xff, 0x69, 0x3f, 0xfb, 0x37, 0x00, 0x00, 0xff, 0xff, 0xf3, 0x7c, 0x9c, 0x49, 0xc1, 0x0f, 0x00, + 0x00, +} diff --git a/vendor/k8s.io/helm/pkg/repo/chartrepo.go b/vendor/k8s.io/helm/pkg/repo/chartrepo.go index b95a7ae07e6..438f66d7cd7 100644 --- a/vendor/k8s.io/helm/pkg/repo/chartrepo.go +++ b/vendor/k8s.io/helm/pkg/repo/chartrepo.go @@ -36,6 +36,8 @@ type Entry struct { Name string `json:"name"` Cache string `json:"cache"` URL string `json:"url"` + Username string `json:"username"` + Password string `json:"password"` CertFile string `json:"certFile"` KeyFile string `json:"keyFile"` CAFile string `json:"caFile"` @@ -117,6 +119,8 @@ func (r *ChartRepository) DownloadIndexFile(cachePath string) error { parsedURL.Path = strings.TrimSuffix(parsedURL.Path, "/") + "/index.yaml" indexURL = parsedURL.String() + + r.setCredentials() resp, err := r.Client.Get(indexURL) if err != nil { return err @@ -145,6 +149,13 @@ func (r *ChartRepository) DownloadIndexFile(cachePath string) error { return ioutil.WriteFile(cp, index, 0644) } +// If HttpGetter is used, this method sets the configured repository credentials on the HttpGetter. +func (r *ChartRepository) setCredentials() { + if t, ok := r.Client.(*getter.HttpGetter); ok { + t.SetCredentials(r.Config.Username, r.Config.Password) + } +} + // Index generates an index for the chart repository and writes an index.yaml file. func (r *ChartRepository) Index() error { err := r.generateIndex() @@ -186,6 +197,13 @@ func (r *ChartRepository) generateIndex() error { // FindChartInRepoURL finds chart in chart repository pointed by repoURL // without adding repo to repositories func FindChartInRepoURL(repoURL, chartName, chartVersion, certFile, keyFile, caFile string, getters getter.Providers) (string, error) { + return FindChartInAuthRepoURL(repoURL, "", "", chartName, chartVersion, certFile, keyFile, caFile, getters) +} + +// FindChartInAuthRepoURL finds chart in chart repository pointed by repoURL +// without adding repo to repositories, like FindChartInRepoURL, +// but it also receives credentials for the chart repository. +func FindChartInAuthRepoURL(repoURL, username, password, chartName, chartVersion, certFile, keyFile, caFile string, getters getter.Providers) (string, error) { // Download and write the index file to a temporary location tempIndexFile, err := ioutil.TempFile("", "tmp-repo-file") @@ -196,6 +214,8 @@ func FindChartInRepoURL(repoURL, chartName, chartVersion, certFile, keyFile, caF c := Entry{ URL: repoURL, + Username: username, + Password: password, CertFile: certFile, KeyFile: keyFile, CAFile: caFile, diff --git a/vendor/k8s.io/helm/pkg/repo/repo.go b/vendor/k8s.io/helm/pkg/repo/repo.go index 194eace79e2..b5bba164e07 100644 --- a/vendor/k8s.io/helm/pkg/repo/repo.go +++ b/vendor/k8s.io/helm/pkg/repo/repo.go @@ -31,7 +31,8 @@ import ( var ErrRepoOutOfDate = errors.New("repository file is out of date") // RepoFile represents the repositories.yaml file in $HELM_HOME -type RepoFile struct { +// TODO: change type name to File in Helm 3 to resolve linter warning +type RepoFile struct { // nolint APIVersion string `json:"apiVersion"` Generated time.Time `json:"generated"` Repositories []*Entry `json:"repositories"` diff --git a/vendor/k8s.io/helm/pkg/version/version.go b/vendor/k8s.io/helm/pkg/version/version.go index 6f5a1a45274..43f1ad40ad9 100644 --- a/vendor/k8s.io/helm/pkg/version/version.go +++ b/vendor/k8s.io/helm/pkg/version/version.go @@ -26,7 +26,7 @@ var ( // Increment major number for new feature additions and behavioral changes. // Increment minor number for bug fixes and performance enhancements. // Increment patch number for critical fixes to existing releases. - Version = "v2.8" + Version = "v2.9" // BuildMetadata is extra build time data BuildMetadata = "unreleased" diff --git a/vendor/k8s.io/kube-openapi/pkg/common/common.go b/vendor/k8s.io/kube-openapi/pkg/common/common.go index fbe01cabb3b..0d235876deb 100644 --- a/vendor/k8s.io/kube-openapi/pkg/common/common.go +++ b/vendor/k8s.io/kube-openapi/pkg/common/common.go @@ -32,7 +32,7 @@ type OpenAPIDefinition struct { type ReferenceCallback func(path string) spec.Ref -// OpenAPIDefinitions is collection of all definitions. +// GetOpenAPIDefinitions is collection of all definitions. type GetOpenAPIDefinitions func(ReferenceCallback) map[string]OpenAPIDefinition // OpenAPIDefinitionGetter gets openAPI definitions for a given type. If a type implements this interface,